Merge remote-tracking branch 'origin' into refactor-cert

This commit is contained in:
Henry Tran 2018-06-21 11:40:49 -04:00
commit 86def984a3
89 changed files with 4420 additions and 1800 deletions

View file

@ -18,7 +18,7 @@ notifications:
on_success: never on_success: never
go: go:
- 1.10.2 - 1.10.3
go_import_path: k8s.io/ingress-nginx go_import_path: k8s.io/ingress-nginx
@ -40,10 +40,12 @@ jobs:
script: script:
- sudo luarocks install luacheck - sudo luarocks install luacheck
- make luacheck - make luacheck
- mkdir --parents $GOPATH/src/golang.org/x - |
&& git clone --depth=1 https://go.googlesource.com/lint $GOPATH/src/golang.org/x/lint go get -d golang.org/x/lint/golint
&& go get golang.org/x/lint/golint cd $GOPATH/src/golang.org/x/tools
- go get github.com/vbatts/git-validation git checkout release-branch.go1.10
go install golang.org/x/lint/golint
cd -
- make verify-all - make verify-all
- stage: Lua Unit Test - stage: Lua Unit Test
before_script: before_script:

8
Gopkg.lock generated
View file

@ -207,6 +207,12 @@
packages = ["."] packages = ["."]
revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062" revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062"
[[projects]]
branch = "master"
name = "github.com/mitchellh/hashstructure"
packages = ["."]
revision = "2bca23e0e452137f789efbc8610126fd8b94f73b"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/mitchellh/mapstructure" name = "github.com/mitchellh/mapstructure"
@ -900,6 +906,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "5feeef324f0cbac72e0234d5f649fc7c4233f4e2bb4477e454e047b5461d7569" inputs-digest = "56ef61f651cca98e6dc7f7d25fd8dec603be3439bf91ba2e19838c5be1cbeea4"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View file

@ -59,7 +59,7 @@ IMAGE = $(REGISTRY)/$(IMGNAME)
MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) MULTI_ARCH_IMG = $(IMAGE)-$(ARCH)
# Set default base image dynamically for each arch # Set default base image dynamically for each arch
BASEIMAGE?=quay.io/kubernetes-ingress-controller/nginx-$(ARCH):0.49 BASEIMAGE?=quay.io/kubernetes-ingress-controller/nginx-$(ARCH):0.52
ifeq ($(ARCH),arm) ifeq ($(ARCH),arm)
QEMUARCH=arm QEMUARCH=arm

2
OWNERS
View file

@ -4,6 +4,8 @@ approvers:
- sig-network-leads - sig-network-leads
- ingress-nginx-admins - ingress-nginx-admins
- ingress-nginx-maintainers - ingress-nginx-maintainers
- ElvinEfendi
- antoineco
reviewers: reviewers:
- aledbf - aledbf

View file

@ -34,12 +34,12 @@ func resetForTesting(usage func()) {
func TestMandatoryFlag(t *testing.T) { func TestMandatoryFlag(t *testing.T) {
_, _, err := parseFlags() _, _, err := parseFlags()
if err == nil { if err == nil {
t.Fatalf("expected and error about default backend service") t.Fatalf("Expected an error about default backend service")
} }
} }
func TestDefaults(t *testing.T) { func TestDefaults(t *testing.T) {
resetForTesting(func() { t.Fatal("bad parse") }) resetForTesting(func() { t.Fatal("Parsing failed") })
oldArgs := os.Args oldArgs := os.Args
defer func() { os.Args = oldArgs }() defer func() { os.Args = oldArgs }()
@ -47,15 +47,15 @@ func TestDefaults(t *testing.T) {
showVersion, conf, err := parseFlags() showVersion, conf, err := parseFlags()
if err != nil { if err != nil {
t.Fatalf("unexpected error parsing default flags: %v", err) t.Fatalf("Unexpected error parsing default flags: %v", err)
} }
if showVersion { if showVersion {
t.Fatal("expected false but true was returned for flag show-version") t.Fatal("Expected flag \"show-version\" to be false")
} }
if conf == nil { if conf == nil {
t.Fatal("expected a configuration but nil returned") t.Fatal("Expected a controller Configuration")
} }
} }

View file

@ -21,7 +21,6 @@ import (
"fmt" "fmt"
"os" "os"
"runtime" "runtime"
"time"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/spf13/pflag" "github.com/spf13/pflag"
@ -39,101 +38,121 @@ func parseFlags() (bool, *controller.Configuration, error) {
var ( var (
flags = pflag.NewFlagSet("", pflag.ExitOnError) flags = pflag.NewFlagSet("", pflag.ExitOnError)
apiserverHost = flags.String("apiserver-host", "", "The address of the Kubernetes Apiserver "+ apiserverHost = flags.String("apiserver-host", "",
"to connect to in the format of protocol://address:port, e.g., "+ `Address of the Kubernetes API server.
"http://localhost:8080. If not specified, the assumption is that the binary runs inside a "+ Takes the form "protocol://address:port". If not specified, it is assumed the
"Kubernetes cluster and local discovery is attempted.") program runs inside a Kubernetes cluster and local discovery is attempted.`)
kubeConfigFile = flags.String("kubeconfig", "", "Path to kubeconfig file with authorization and master location information.")
kubeConfigFile = flags.String("kubeconfig", "",
`Path to a kubeconfig file containing authorization and API server information.`)
defaultSvc = flags.String("default-backend-service", "", defaultSvc = flags.String("default-backend-service", "",
`Service used to serve a 404 page for the default backend. Takes the form `Service used to serve HTTP requests not matching any known server name (catch-all).
namespace/name. The controller uses the first node port of this Service for Takes the form "namespace/name". The controller configures NGINX to forward
the default backend.`) requests to the first port of this Service.`)
ingressClass = flags.String("ingress-class", "", ingressClass = flags.String("ingress-class", "",
`Name of the ingress class to route through this controller.`) `Name of the ingress class this controller satisfies.
The class of an Ingress object is set using the annotation "kubernetes.io/ingress.class".
All ingress classes are satisfied if this parameter is left empty.`)
configMap = flags.String("configmap", "", configMap = flags.String("configmap", "",
`Name of the ConfigMap that contains the custom configuration to use`) `Name of the ConfigMap containing custom global configurations for the controller.`)
publishSvc = flags.String("publish-service", "", publishSvc = flags.String("publish-service", "",
`Service fronting the ingress controllers. Takes the form namespace/name. `Service fronting the Ingress controller.
The controller will set the endpoint records on the ingress objects to reflect those on the service.`) Takes the form "namespace/name". When used together with update-status, the
controller mirrors the address of this service's endpoints to the load-balancer
status of all Ingress objects it satisfies.`)
tcpConfigMapName = flags.String("tcp-services-configmap", "", tcpConfigMapName = flags.String("tcp-services-configmap", "",
`Name of the ConfigMap that contains the definition of the TCP services to expose. `Name of the ConfigMap containing the definition of the TCP services to expose.
The key in the map indicates the external port to be used. The value is the name of the The key in the map indicates the external port to be used. The value is a
service with the format namespace/serviceName and the port of the service could be a reference to a Service in the form "namespace/name:port", where "port" can
number of the name of the port. either be a port number or name. TCP ports 80 and 443 are reserved by the
The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend`) controller for servicing HTTP traffic.`)
udpConfigMapName = flags.String("udp-services-configmap", "", udpConfigMapName = flags.String("udp-services-configmap", "",
`Name of the ConfigMap that contains the definition of the UDP services to expose. `Name of the ConfigMap containing the definition of the UDP services to expose.
The key in the map indicates the external port to be used. The value is the name of the The key in the map indicates the external port to be used. The value is a
service with the format namespace/serviceName and the port of the service could be a reference to a Service in the form "namespace/name:port", where "port" can
number of the name of the port.`) either be a port name or number.`)
resyncPeriod = flags.Duration("sync-period", 600*time.Second, resyncPeriod = flags.Duration("sync-period", 0,
`Relist and confirm cloud resources this often. Default is 10 minutes`) `Period at which the controller forces the repopulation of its local object stores. Disabled by default.`)
watchNamespace = flags.String("watch-namespace", apiv1.NamespaceAll, watchNamespace = flags.String("watch-namespace", apiv1.NamespaceAll,
`Namespace to watch for Ingress. Default is to watch all namespaces`) `Namespace the controller watches for updates to Kubernetes objects.
This includes Ingresses, Services and all configuration resources. All
namespaces are watched if this parameter is left empty.`)
profiling = flags.Bool("profiling", true, `Enable profiling via web interface host:port/debug/pprof/`) profiling = flags.Bool("profiling", true,
`Enable profiling via web interface host:port/debug/pprof/`)
defSSLCertificate = flags.String("default-ssl-certificate", "", `Name of the secret defSSLCertificate = flags.String("default-ssl-certificate", "",
that contains a SSL certificate to be used as default for a HTTPS catch-all server. `Secret containing a SSL certificate to be used by the default HTTPS server (catch-all).
Takes the form <namespace>/<secret name>.`) Takes the form "namespace/name".`)
defHealthzURL = flags.String("health-check-path", "/healthz", `Defines defHealthzURL = flags.String("health-check-path", "/healthz",
the URL to be used as health check inside in the default server in NGINX.`) `URL path of the health check endpoint.
Configured inside the NGINX status server. All requests received on the port
defined by the healthz-port parameter are forwarded internally to this path.`)
updateStatus = flags.Bool("update-status", true, `Indicates if the updateStatus = flags.Bool("update-status", true,
ingress controller should update the Ingress status IP/hostname. Default is true`) `Update the load-balancer status of Ingress objects this controller satisfies.
Requires setting the publish-service parameter to a valid Service reference.`)
electionID = flags.String("election-id", "ingress-controller-leader", `Election id to use for status update.`) electionID = flags.String("election-id", "ingress-controller-leader",
`Election id to use for Ingress status updates.`)
forceIsolation = flags.Bool("force-namespace-isolation", false, forceIsolation = flags.Bool("force-namespace-isolation", false,
`Force namespace isolation. This flag is required to avoid the reference of secrets or `Force namespace isolation.
configmaps located in a different namespace than the specified in the flag --watch-namespace.`) Prevents Ingress objects from referencing Secrets and ConfigMaps located in a
different namespace than their own. May be used together with watch-namespace.`)
updateStatusOnShutdown = flags.Bool("update-status-on-shutdown", true, `Indicates if the updateStatusOnShutdown = flags.Bool("update-status-on-shutdown", true,
ingress controller should update the Ingress status IP/hostname when the controller `Update the load-balancer status of Ingress objects when the controller shuts down.
is being stopped. Default is true`) Requires the update-status parameter.`)
sortBackends = flags.Bool("sort-backends", false, `Defines if servers inside NGINX upstream should be sorted`) sortBackends = flags.Bool("sort-backends", false,
`Sort servers inside NGINX upstreams.`)
useNodeInternalIP = flags.Bool("report-node-internal-ip-address", false, useNodeInternalIP = flags.Bool("report-node-internal-ip-address", false,
`Defines if the nodes IP address to be returned in the ingress status should be the internal instead of the external IP address`) `Set the load-balancer status of Ingress objects to internal Node addresses instead of external.
Requires the update-status parameter.`)
showVersion = flags.Bool("version", false, showVersion = flags.Bool("version", false,
`Shows release information about the NGINX Ingress controller`) `Show release information about the NGINX Ingress controller and exit.`)
enableSSLPassthrough = flags.Bool("enable-ssl-passthrough", false, `Enable SSL passthrough feature. Default is disabled`) enableSSLPassthrough = flags.Bool("enable-ssl-passthrough", false,
`Enable SSL Passthrough.`)
httpPort = flags.Int("http-port", 80, `Indicates the port to use for HTTP traffic`) annotationsPrefix = flags.String("annotations-prefix", "nginx.ingress.kubernetes.io",
httpsPort = flags.Int("https-port", 443, `Indicates the port to use for HTTPS traffic`) `Prefix of the Ingress annotations specific to the NGINX controller.`)
statusPort = flags.Int("status-port", 18080, `Indicates the TCP port to use for exposing the nginx status page`)
sslProxyPort = flags.Int("ssl-passtrough-proxy-port", 442, `Default port to use internally for SSL when SSL Passthgough is enabled`)
defServerPort = flags.Int("default-server-port", 8181, `Default port to use for exposing the default server (catch all)`)
healthzPort = flags.Int("healthz-port", 10254, "port for healthz endpoint.")
annotationsPrefix = flags.String("annotations-prefix", "nginx.ingress.kubernetes.io", `Prefix of the ingress annotations.`)
enableSSLChainCompletion = flags.Bool("enable-ssl-chain-completion", true, enableSSLChainCompletion = flags.Bool("enable-ssl-chain-completion", true,
`Defines if the nginx ingress controller should check the secrets for missing intermediate CA certificates. `Autocomplete SSL certificate chains with missing intermediate CA certificates.
If the certificate contain issues chain issues is not possible to enable OCSP. A valid certificate chain is required to enable OCSP stapling. Certificates
Default is true.`) uploaded to Kubernetes must have the "Authority Information Access" X.509 v3
extension for this to succeed.`)
syncRateLimit = flags.Float32("sync-rate-limit", 0.3, syncRateLimit = flags.Float32("sync-rate-limit", 0.3,
`Define the sync frequency upper limit`) `Define the sync frequency upper limit`)
publishStatusAddress = flags.String("publish-status-address", "", publishStatusAddress = flags.String("publish-status-address", "",
`User customized address to be set in the status of ingress resources. The controller will set the `Customized address to set as the load-balancer status of Ingress objects this controller satisfies.
endpoint records on the ingress using this address.`) Requires the update-status parameter.`)
dynamicConfigurationEnabled = flags.Bool("enable-dynamic-configuration", false, dynamicConfigurationEnabled = flags.Bool("enable-dynamic-configuration", false,
`When enabled controller will try to avoid Nginx reloads as much as possible by using Lua. Disabled by default.`) `Dynamically refresh backends on topology changes instead of reloading NGINX.
Feature backed by OpenResty Lua libraries.`)
httpPort = flags.Int("http-port", 80, `Port to use for servicing HTTP traffic.`)
httpsPort = flags.Int("https-port", 443, `Port to use for servicing HTTPS traffic.`)
statusPort = flags.Int("status-port", 18080, `Port to use for exposing NGINX status pages.`)
sslProxyPort = flags.Int("ssl-passtrough-proxy-port", 442, `Port to use internally for SSL Passthgough.`)
defServerPort = flags.Int("default-server-port", 8181, `Port to use for exposing the default server (catch-all).`)
healthzPort = flags.Int("healthz-port", 10254, "Port to use for the healthz endpoint.")
) )
flag.Set("logtostderr", "true") flag.Set("logtostderr", "true")
@ -158,10 +177,10 @@ func parseFlags() (bool, *controller.Configuration, error) {
} }
if *ingressClass != "" { if *ingressClass != "" {
glog.Infof("Watching for ingress class: %s", *ingressClass) glog.Infof("Watching for Ingress class: %s", *ingressClass)
if *ingressClass != class.DefaultClass { if *ingressClass != class.DefaultClass {
glog.Warningf("only Ingress with class \"%v\" will be processed by this ingress controller", *ingressClass) glog.Warningf("Only Ingresses with class %q will be processed by this ingress controller", *ingressClass)
} }
class.IngressClass = *ingressClass class.IngressClass = *ingressClass
@ -191,7 +210,7 @@ func parseFlags() (bool, *controller.Configuration, error) {
} }
if !*enableSSLChainCompletion { if !*enableSSLChainCompletion {
glog.Warningf("Check of SSL certificate chain is disabled (--enable-ssl-chain-completion=false)") glog.Warningf("SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false)")
} }
// LuaJIT is not available on arch s390x and ppc64le // LuaJIT is not available on arch s390x and ppc64le
@ -200,7 +219,7 @@ func parseFlags() (bool, *controller.Configuration, error) {
disableLua = true disableLua = true
if *dynamicConfigurationEnabled { if *dynamicConfigurationEnabled {
*dynamicConfigurationEnabled = false *dynamicConfigurationEnabled = false
glog.Warningf("Disabling dynamic configuration feature (LuaJIT is not available in s390x and ppc64le)") glog.Warningf("LuaJIT is not available on s390x and ppc64le architectures: disabling dynamic configuration feature.")
} }
} }

View file

@ -39,12 +39,25 @@ import (
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/file"
"k8s.io/ingress-nginx/internal/ingress/annotations/class"
"k8s.io/ingress-nginx/internal/ingress/controller" "k8s.io/ingress-nginx/internal/ingress/controller"
"k8s.io/ingress-nginx/internal/ingress/metric/collector"
"k8s.io/ingress-nginx/internal/k8s" "k8s.io/ingress-nginx/internal/k8s"
"k8s.io/ingress-nginx/internal/net/ssl" "k8s.io/ingress-nginx/internal/net/ssl"
"k8s.io/ingress-nginx/version" "k8s.io/ingress-nginx/version"
) )
const (
// High enough QPS to fit all expected use cases. QPS=0 is not set here, because
// client code is overriding it.
defaultQPS = 1e6
// High enough Burst to fit all expected use cases. Burst=0 is not set here, because
// client code is overriding it.
defaultBurst = 1e6
fakeCertificate = "default-fake-certificate"
)
func main() { func main() {
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
@ -71,36 +84,33 @@ func main() {
handleFatalInitError(err) handleFatalInitError(err)
} }
ns, name, err := k8s.ParseNameNS(conf.DefaultService) defSvcNs, defSvcName, err := k8s.ParseNameNS(conf.DefaultService)
if err != nil { if err != nil {
glog.Fatal(err) glog.Fatal(err)
} }
_, err = kubeClient.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) _, err = kubeClient.CoreV1().Services(defSvcNs).Get(defSvcName, metav1.GetOptions{})
if err != nil { if err != nil {
// TODO (antoineco): compare with error types from k8s.io/apimachinery/pkg/api/errors
if strings.Contains(err.Error(), "cannot get services in the namespace") { if strings.Contains(err.Error(), "cannot get services in the namespace") {
glog.Fatalf("✖ It seems the cluster it is running with Authorization enabled (like RBAC) and there is no permissions for the ingress controller. Please check the configuration") glog.Fatalf("✖ The cluster seems to be running with a restrictive Authorization mode and the Ingress controller does not have the required permissions to operate normally.")
} }
glog.Fatalf("no service with name %v found: %v", conf.DefaultService, err) glog.Fatalf("No service with name %v found: %v", conf.DefaultService, err)
} }
glog.Infof("validated %v as the default backend", conf.DefaultService) glog.Infof("Validated %v as the default backend.", conf.DefaultService)
if conf.Namespace != "" { if conf.Namespace != "" {
_, err = kubeClient.CoreV1().Namespaces().Get(conf.Namespace, metav1.GetOptions{}) _, err = kubeClient.CoreV1().Namespaces().Get(conf.Namespace, metav1.GetOptions{})
if err != nil { if err != nil {
glog.Fatalf("no namespace with name %v found: %v", conf.Namespace, err) glog.Fatalf("No namespace with name %v found: %v", conf.Namespace, err)
} }
} }
if conf.ResyncPeriod.Seconds() < 10 {
glog.Fatalf("resync period (%vs) is too low", conf.ResyncPeriod.Seconds())
}
// create the default SSL certificate (dummy) // create the default SSL certificate (dummy)
defCert, defKey := ssl.GetFakeSSLCert() defCert, defKey := ssl.GetFakeSSLCert()
c, err := ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{}, fs) c, err := ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{}, fs)
if err != nil { if err != nil {
glog.Fatalf("Error generating self signed certificate: %v", err) glog.Fatalf("Error generating self-signed certificate: %v", err)
} }
conf.FakeCertificatePath = c.PemFileName conf.FakeCertificatePath = c.PemFileName
@ -117,6 +127,17 @@ func main() {
mux := http.NewServeMux() mux := http.NewServeMux()
go registerHandlers(conf.EnableProfiling, conf.ListenPorts.Health, ngx, mux) go registerHandlers(conf.EnableProfiling, conf.ListenPorts.Health, ngx, mux)
err = collector.InitNGINXStatusCollector(conf.Namespace, class.IngressClass, conf.ListenPorts.Status)
if err != nil {
glog.Fatalf("Error creating metric collector: %v", err)
}
err = collector.NewInstance(conf.Namespace, class.IngressClass)
if err != nil {
glog.Fatalf("Error creating unix socket server: %v", err)
}
ngx.Start() ngx.Start()
} }
@ -130,24 +151,26 @@ func handleSigterm(ngx *controller.NGINXController, exit exiter) {
exitCode := 0 exitCode := 0
if err := ngx.Stop(); err != nil { if err := ngx.Stop(); err != nil {
glog.Infof("Error during shutdown %v", err) glog.Infof("Error during shutdown: %v", err)
exitCode = 1 exitCode = 1
} }
glog.Infof("Handled quit, awaiting pod deletion") glog.Infof("Handled quit, awaiting Pod deletion")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
glog.Infof("Exiting with %v", exitCode) glog.Infof("Exiting with %v", exitCode)
exit(exitCode) exit(exitCode)
} }
// createApiserverClient creates new Kubernetes Apiserver client. When kubeconfig or apiserverHost param is empty // createApiserverClient creates a new Kubernetes REST client. apiserverHost is
// the function assumes that it is running inside a Kubernetes cluster and attempts to // the URL of the API server in the format protocol://address:port/pathPrefix,
// discover the Apiserver. Otherwise, it connects to the Apiserver specified. // kubeConfig is the location of a kubeconfig file. If defined, the kubeconfig
// // file is loaded first, the URL of the API server read from the file is then
// apiserverHost param is in the format of protocol://address:port/pathPrefix, e.g.http://localhost:8001. // optionally overriden by the value of apiserverHost.
// kubeConfig location of kubeconfig file // If neither apiserverHost or kubeconfig are passed in, we assume the
func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes.Clientset, error) { // controller runs inside Kubernetes and fallback to the in-cluster config. If
// the in-cluster config is missing or fails, we fallback to the default config.
func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Clientset, error) {
cfg, err := clientcmd.BuildConfigFromFlags(apiserverHost, kubeConfig) cfg, err := clientcmd.BuildConfigFromFlags(apiserverHost, kubeConfig)
if err != nil { if err != nil {
return nil, err return nil, err
@ -166,7 +189,7 @@ func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes
var v *discovery.Info var v *discovery.Info
// In some environments is possible the client cannot connect the API server in the first request // The client may fail to connect to the API server in the first request.
// https://github.com/kubernetes/ingress-nginx/issues/1968 // https://github.com/kubernetes/ingress-nginx/issues/1968
defaultRetry := wait.Backoff{ defaultRetry := wait.Backoff{
Steps: 10, Steps: 10,
@ -177,7 +200,7 @@ func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes
var lastErr error var lastErr error
retries := 0 retries := 0
glog.V(2).Info("trying to discover Kubernetes version") glog.V(2).Info("Trying to discover Kubernetes version")
err = wait.ExponentialBackoff(defaultRetry, func() (bool, error) { err = wait.ExponentialBackoff(defaultRetry, func() (bool, error) {
v, err = client.Discovery().ServerVersion() v, err = client.Discovery().ServerVersion()
@ -186,48 +209,35 @@ func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes
} }
lastErr = err lastErr = err
glog.V(2).Infof("unexpected error discovering Kubernetes version (attempt %v): %v", err, retries) glog.V(2).Infof("Unexpected error discovering Kubernetes version (attempt %v): %v", err, retries)
retries++ retries++
return false, nil return false, nil
}) })
// err is not null only if there was a timeout in the exponential backoff (ErrWaitTimeout) // err is returned in case of timeout in the exponential backoff (ErrWaitTimeout)
if err != nil { if err != nil {
return nil, lastErr return nil, lastErr
} }
// this should not happen, warn the user // this should not happen, warn the user
if retries > 0 { if retries > 0 {
glog.Warningf("it was required to retry %v times before reaching the API server", retries) glog.Warningf("Initial connection to the Kubernetes API server was retried %d times.", retries)
} }
glog.Infof("Running in Kubernetes Cluster version v%v.%v (%v) - git (%v) commit %v - platform %v", glog.Infof("Running in Kubernetes cluster version v%v.%v (%v) - git (%v) commit %v - platform %v",
v.Major, v.Minor, v.GitVersion, v.GitTreeState, v.GitCommit, v.Platform) v.Major, v.Minor, v.GitVersion, v.GitTreeState, v.GitCommit, v.Platform)
return client, nil return client, nil
} }
const ( // Handler for fatal init errors. Prints a verbose error message and exits.
// High enough QPS to fit all expected use cases. QPS=0 is not set here, because
// client code is overriding it.
defaultQPS = 1e6
// High enough Burst to fit all expected use cases. Burst=0 is not set here, because
// client code is overriding it.
defaultBurst = 1e6
fakeCertificate = "default-fake-certificate"
)
/**
* Handles fatal init error that prevents server from doing any work. Prints verbose error
* messages and quits the server.
*/
func handleFatalInitError(err error) { func handleFatalInitError(err error) {
glog.Fatalf("Error while initializing connection to Kubernetes apiserver. "+ glog.Fatalf("Error while initiating a connection to the Kubernetes API server. "+
"This most likely means that the cluster is misconfigured (e.g., it has "+ "This could mean the cluster is misconfigured (e.g. it has invalid API server certificates "+
"invalid apiserver certificates or service accounts configuration). Reason: %s\n"+ "or Service Accounts configuration). Reason: %s\n"+
"Refer to the troubleshooting guide for more information: "+ "Refer to the troubleshooting guide for more information: "+
"https://github.com/kubernetes/ingress-nginx/blob/master/docs/troubleshooting.md", err) "https://kubernetes.github.io/ingress-nginx/troubleshooting/",
err)
} }
func registerHandlers(enableProfiling bool, port int, ic *controller.NGINXController, mux *http.ServeMux) { func registerHandlers(enableProfiling bool, port int, ic *controller.NGINXController, mux *http.ServeMux) {
@ -248,7 +258,7 @@ func registerHandlers(enableProfiling bool, port int, ic *controller.NGINXContro
mux.HandleFunc("/stop", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/stop", func(w http.ResponseWriter, r *http.Request) {
err := syscall.Kill(syscall.Getpid(), syscall.SIGTERM) err := syscall.Kill(syscall.Getpid(), syscall.SIGTERM)
if err != nil { if err != nil {
glog.Errorf("unexpected error: %v", err) glog.Errorf("Unexpected error: %v", err)
} }
}) })

View file

@ -33,15 +33,15 @@ func TestCreateApiserverClient(t *testing.T) {
cli, err := createApiserverClient("", kubeConfigFile) cli, err := createApiserverClient("", kubeConfigFile)
if err != nil { if err != nil {
t.Fatalf("unexpected error creating api server client: %v", err) t.Fatalf("Unexpected error creating Kubernetes REST client: %v", err)
} }
if cli == nil { if cli == nil {
t.Fatalf("expected a kubernetes client but none returned") t.Fatal("Expected a REST client but none returned.")
} }
_, err = createApiserverClient("", "") _, err = createApiserverClient("", "")
if err == nil { if err == nil {
t.Fatalf("expected an error creating api server client without an api server URL or kubeconfig file") t.Fatal("Expected an error creating REST client without an API server URL or kubeconfig file.")
} }
} }
@ -51,7 +51,7 @@ func TestHandleSigterm(t *testing.T) {
cli, err := createApiserverClient("", kubeConfigFile) cli, err := createApiserverClient("", kubeConfigFile)
if err != nil { if err != nil {
t.Fatalf("unexpected error creating api server client: %v", err) t.Fatalf("Unexpected error creating Kubernetes REST client: %v", err)
} }
resetForTesting(func() { t.Fatal("bad parse") }) resetForTesting(func() { t.Fatal("bad parse") })
@ -67,20 +67,20 @@ func TestHandleSigterm(t *testing.T) {
_, conf, err := parseFlags() _, conf, err := parseFlags()
if err != nil { if err != nil {
t.Errorf("unexpected error creating NGINX controller: %v", err) t.Errorf("Unexpected error creating NGINX controller: %v", err)
} }
conf.Client = cli conf.Client = cli
fs, err := file.NewFakeFS() fs, err := file.NewFakeFS()
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("Unexpected error: %v", err)
} }
ngx := controller.NewNGINXController(conf, fs) ngx := controller.NewNGINXController(conf, fs)
go handleSigterm(ngx, func(code int) { go handleSigterm(ngx, func(code int) {
if code != 1 { if code != 1 {
t.Errorf("expected exit code 1 but %v received", code) t.Errorf("Expected exit code 1 but %d received", code)
} }
return return
@ -88,12 +88,13 @@ func TestHandleSigterm(t *testing.T) {
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
t.Logf("sending SIGTERM to process PID %v", syscall.Getpid()) t.Logf("Sending SIGTERM to PID %d", syscall.Getpid())
err = syscall.Kill(syscall.Getpid(), syscall.SIGTERM) err = syscall.Kill(syscall.Getpid(), syscall.SIGTERM)
if err != nil { if err != nil {
t.Errorf("unexpected error sending SIGTERM signal") t.Error("Unexpected error sending SIGTERM signal.")
} }
} }
func TestRegisterHandlers(t *testing.T) { func TestRegisterHandlers(t *testing.T) {
// TODO
} }

View file

@ -28,7 +28,7 @@ spec:
value: <Client ID> value: <Client ID>
- name: OAUTH2_PROXY_CLIENT_SECRET - name: OAUTH2_PROXY_CLIENT_SECRET
value: <Client Secret> value: <Client Secret>
# python -c 'import os,base64; print base64.b64encode(os.urandom(16))' # docker run -ti --rm python:3-alpine python -c 'import secrets,base64; print(base64.b64encode(base64.b64encode(secrets.token_bytes(16))));'
- name: OAUTH2_PROXY_COOKIE_SECRET - name: OAUTH2_PROXY_COOKIE_SECRET
value: SECRET value: SECRET
image: docker.io/colemickens/oauth2_proxy:latest image: docker.io/colemickens/oauth2_proxy:latest

View file

@ -1,82 +1,83 @@
# Custom Errors # Custom Errors
This example shows how is possible to use a custom backend to render custom error pages. The code of this example is located here [custom-error-pages](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-errors) This example demonstrates how to use a custom backend to render custom error pages.
## Customized default backend
The idea is to use the headers `X-Code` and `X-Format` that NGINX pass to the backend in case of an error to find out the best existent representation of the response to be returned. i.e. if the request contains an `Accept` header of type `json` the error should be in that format and not in `html` (the default in NGINX). First, create the custom `default-backend`. It will be used by the Ingress controller later on.
First create the custom backend to use in the Ingress controller
``` ```
$ kubectl create -f custom-default-backend.yaml $ kubectl create -f custom-default-backend.yaml
service "nginx-errors" created service "nginx-errors" created
replicationcontroller "nginx-errors" created deployment.apps "nginx-errors" created
``` ```
``` This should have created a Deployment and a Service with the name `nginx-errors`.
$ kubectl get svc
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
echoheaders 10.3.0.7 nodes 80/TCP 23d
kubernetes 10.3.0.1 <none> 443/TCP 34d
nginx-errors 10.3.0.102 <none> 80/TCP 11s
```
``` ```
$ kubectl get rc $ kubectl get deploy,svc
CONTROLLER REPLICAS AGE NAME DESIRED CURRENT READY AGE
echoheaders 1 19d deployment.apps/nginx-errors 1 1 1 10s
nginx-errors 1 19s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx-errors ClusterIP 10.0.0.12 <none> 80/TCP 10s
``` ```
Next create the Ingress controller executing ## Ingress controller configuration
```
$ kubectl create -f rc-custom-errors.yaml
```
Now to check if this is working we use curl: If you do not already have an instance of the the NGINX Ingress controller running, deploy it according to the
[deployment guide][deploy], then follow these steps:
1. Edit the `nginx-ingress-controller` Deployment and set the value of the `--default-backend` flag to the name of the
newly created error backend.
2. Edit the `nginx-configuration` ConfigMap and create the key `custom-http-errors` with a value of `404,503`.
3. Take note of the IP address assigned to the NGINX Ingress controller Service.
```
$ kubectl get svc ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx ClusterIP 10.0.0.13 <none> 80/TCP,443/TCP 10m
```
!!! Note
The `ingress-nginx` Service is of type `ClusterIP` in this example. This may vary depending on your environment.
Make sure you can use the Service to reach NGINX before proceeding with the rest of this example.
[deploy]: ../../../deploy/
## Testing error pages
Let us send a couple of HTTP requests using cURL and validate everything is working as expected.
A request to the default backend returns a 404 error with a custom message:
``` ```
$ curl -v http://172.17.4.99/ $ curl -D- http://10.0.0.13/
* Trying 172.17.4.99... HTTP/1.1 404 Not Found
* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) Server: nginx/1.13.12
> GET / HTTP/1.1 Date: Tue, 12 Jun 2018 19:11:24 GMT
> Host: 172.17.4.99 Content-Type: */*
> User-Agent: curl/7.43.0 Transfer-Encoding: chunked
> Accept: */* Connection: keep-alive
>
< HTTP/1.1 404 Not Found
< Server: nginx/1.10.0
< Date: Wed, 04 May 2016 02:53:45 GMT
< Content-Type: text/html
< Transfer-Encoding: chunked
< Connection: keep-alive
< Vary: Accept-Encoding
<
<span>The page you're looking for could not be found.</span> <span>The page you're looking for could not be found.</span>
* Connection #0 to host 172.17.4.99 left intact
``` ```
Specifying json as expected format: A request with a custom `Accept` header returns the corresponding document type (JSON):
``` ```
$ curl -v http://172.17.4.99/ -H 'Accept: application/json' $ curl -D- -H 'Accept: application/json' http://10.0.0.13/
* Trying 172.17.4.99... HTTP/1.1 404 Not Found
* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) Server: nginx/1.13.12
> GET / HTTP/1.1 Date: Tue, 12 Jun 2018 19:12:36 GMT
> Host: 172.17.4.99 Content-Type: application/json
> User-Agent: curl/7.43.0 Transfer-Encoding: chunked
> Accept: application/json Connection: keep-alive
> Vary: Accept-Encoding
< HTTP/1.1 404 Not Found
< Server: nginx/1.10.0
< Date: Wed, 04 May 2016 02:54:00 GMT
< Content-Type: text/html
< Transfer-Encoding: chunked
< Connection: keep-alive
< Vary: Accept-Encoding
<
{ "message": "The page you're looking for could not be found" } { "message": "The page you're looking for could not be found" }
* Connection #0 to host 172.17.4.99 left intact
``` ```
To go further with this example, feel free to deploy your own applications and Ingress objects, and validate that the
responses are still in the correct format when a backend returns 503 (eg. if you scale a Deployment down to 0 replica).

View file

@ -1,3 +1,4 @@
---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
@ -5,27 +6,35 @@ metadata:
labels: labels:
app: nginx-errors app: nginx-errors
spec: spec:
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector: selector:
app: nginx-errors app: nginx-errors
ports:
- port: 80
targetPort: 8080
name: http
--- ---
apiVersion: v1 apiVersion: apps/v1beta2
kind: ReplicationController kind: Deployment
apiVersion: apps/v1beta2
metadata: metadata:
name: nginx-errors name: nginx-errors
spec: spec:
replicas: 1 replicas: 1
selector:
matchLabels:
app: nginx-errors
template: template:
metadata: metadata:
labels: labels:
app: nginx-errors app: nginx-errors
spec: spec:
containers: containers:
- name: nginx-errors - name: nginx-error-server
image: aledbf/nginx-error-server:0.1 image: quay.io/kubernetes-ingress-controller/custom-error-pages-amd64:0.3
ports: ports:
- containerPort: 80 - containerPort: 8080
# Setting the environment variable DEBUG we can see the headers sent
# by the ingress controller to the backend in the client response.
# env:
# - name: DEBUG
# value: "true"

View file

@ -1,53 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-lb
spec:
replicas: 1
selector:
k8s-app: nginx-ingress-lb
template:
metadata:
labels:
k8s-app: nginx-ingress-lb
name: nginx-ingress-lb
spec:
terminationGracePeriodSeconds: 60
containers:
- image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.15.0
name: nginx-ingress-lb
imagePullPolicy: Always
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
# use downward API
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/nginx-errors
securityContext:
runAsNonRoot: false

View file

@ -42,6 +42,3 @@ $ kubectl exec nginx-ingress-controller-v1ppm cat /etc/nginx/nginx.conf
} }
.... ....
``` ```
![nginx-module-vts](custom-upstream.png "screenshot with custom configuration")

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

View file

@ -1,103 +0,0 @@
# Custom VTS metrics with Prometheus
This example aims to demonstrate the deployment of an nginx ingress controller and use a ConfigMap to enable [nginx vts module](https://github.com/vozlt/nginx-module-vts
) to export metrics in prometheus format.
## vts-metrics
Vts-metrics export NGINX metrics. To deploy all the files simply run `kubectl apply -f nginx`. A deployment and service will be
created which already has a `prometheus.io/scrape: 'true'` annotation and if you added
the recommended Prometheus service-endpoint scraping [configuration](https://raw.githubusercontent.com/prometheus/prometheus/master/documentation/examples/prometheus-kubernetes.yml),
Prometheus will scrape it automatically and you start using the generated metrics right away.
## Custom configuration
```console
apiVersion: v1
data:
enable-vts-status: "true"
kind: ConfigMap
metadata:
name: nginx-configuration
namespace: ingress-nginx
labels:
app: ingress-nginx
```
```console
$ kubectl apply -f nginx-vts-metrics-conf.yaml
```
## Result
Check whether the ingress controller successfully generated the NGINX vts status:
```console
$ kubectl exec nginx-ingress-controller-873061567-4n3k2 -n ingress-nginx cat /etc/nginx/nginx.conf|grep vhost_traffic_status_display
vhost_traffic_status_display;
vhost_traffic_status_display_format html;
```
### NGINX vts dashboard
The vts dashboard provides real time metrics.
![vts dashboard](imgs/vts-dashboard.png)
Because the vts port it's not yet exposed, you should forward the controller port to see it.
```console
$ kubectl port-forward $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n ingress-nginx --output=jsonpath={.items..metadata.name}) -n ingress-nginx 18080
```
Now open the url [http://localhost:18080/nginx_status](http://localhost:18080/nginx_status) in your browser.
### Prometheus metrics output
NGINX Ingress controller already has a parser to convert vts metrics to Prometheus format. It exports prometheus metrics to the address `:10254/metrics`.
```console
$ kubectl exec -ti -n ingress-nginx $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n kube-system --output=jsonpath={.items..metadata.name}) curl localhost:10254/metrics
ingress_controller_ssl_expire_time_seconds{host="foo.bar.com"} -6.21355968e+10
# HELP ingress_controller_success Cumulative number of Ingress controller reload operations
# TYPE ingress_controller_success counter
ingress_controller_success{count="reloads"} 3
# HELP nginx_bytes_total Nginx bytes count
# TYPE nginx_bytes_total counter
nginx_bytes_total{direction="in",ingress_class="nginx",namespace="",server_zone="*"} 3708
nginx_bytes_total{direction="in",ingress_class="nginx",namespace="",server_zone="_"} 3708
nginx_bytes_total{direction="out",ingress_class="nginx",namespace="",server_zone="*"} 5256
nginx_bytes_total{direction="out",ingress_class="nginx",namespace="",server_zone="_"} 5256
```
### Customize metrics
The default [vts vhost key](https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key) is `$geoip_country_code country::*` that expose metrics grouped by server and country code. The example below show how to have metrics grouped by server and server path.
![vts dashboard](imgs/vts-dashboard-filter-key-path.png)
## NGINX custom configuration ( http level )
```
apiVersion: v1
kind: ConfigMap
data:
enable-vts-status: "true"
vts-default-filter-key: "$server_name"
...
```
## Customize ingress
```
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/vts-filter-key: $uri $server_name
name: ingress
```
## Result
![prometheus filter key path](imgs/prometheus-filter-key-path.png)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 969 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 451 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 244 KiB

View file

@ -1,9 +0,0 @@
apiVersion: v1
data:
enable-vts-status: "true"
kind: ConfigMap
metadata:
name: nginx-configuration
namespace: ingress-nginx
labels:
app: ingress-nginx

View file

@ -13,10 +13,9 @@ Auth | [OAuth external auth](auth/oauth-external-auth/README.md) | TODO | TODO
Customization | [Configuration snippets](customization/configuration-snippets/README.md) | customize nginx location configuration using annotations | Advanced Customization | [Configuration snippets](customization/configuration-snippets/README.md) | customize nginx location configuration using annotations | Advanced
Customization | [Custom configuration](customization/custom-configuration/README.md) | TODO | TODO Customization | [Custom configuration](customization/custom-configuration/README.md) | TODO | TODO
Customization | [Custom DH parameters for perfect forward secrecy](customization/ssl-dh-param/README.md) | TODO | TODO Customization | [Custom DH parameters for perfect forward secrecy](customization/ssl-dh-param/README.md) | TODO | TODO
Customization | [Custom errors](customization/custom-errors/README.md) | TODO | TODO Customization | [Custom errors](customization/custom-errors/README.md) | serve custom error pages from the default backend | Intermediate
Customization | [Custom headers](customization/custom-headers/README.md) | set custom headers before sending traffic to backends | Advanced Customization | [Custom headers](customization/custom-headers/README.md) | set custom headers before sending traffic to backends | Advanced
Customization | [Custom upstream check](customization/custom-upstream-check/README.md) | TODO | TODO Customization | [Custom upstream check](customization/custom-upstream-check/README.md) | TODO | TODO
Customization | [Custom VTS metrics with Prometheus](customization/custom-vts-metrics-prometheus/README.md) | TODO | TODO
Customization | [External authentication with response header propagation](customization/external-auth-headers/README.md) | TODO | TODO Customization | [External authentication with response header propagation](customization/external-auth-headers/README.md) | TODO | TODO
Customization | [Sysctl tuning](customization/sysctl/README.md) | TODO | TODO Customization | [Sysctl tuning](customization/sysctl/README.md) | TODO | TODO
Features | [Rewrite](rewrite/README.md) | TODO | TODO Features | [Rewrite](rewrite/README.md) | TODO | TODO

View file

@ -1,48 +1,48 @@
# Command line arguments # Command line arguments
The following command line arguments are accepted by the main controller executable. The following command line arguments are accepted by the Ingress controller executable.
They are set in the container spec of the `nginx-ingress-controller` Deployment object (see `deploy/with-rbac.yaml` or `deploy/without-rbac.yaml`). They are set in the container spec of the `nginx-ingress-controller` Deployment manifest (see `deploy/with-rbac.yaml` or `deploy/without-rbac.yaml`).
| Argument | Description | | Argument | Description |
|----------|-------------| |----------|-------------|
| `--alsologtostderr` | log to standard error as well as files | | --alsologtostderr | log to standard error as well as files |
| `--annotations-prefix string` | Prefix of the ingress annotations. (default "nginx.ingress.kubernetes.io") | | --annotations-prefix string | Prefix of the Ingress annotations specific to the NGINX controller. (default "nginx.ingress.kubernetes.io") |
| `--apiserver-host string` | The address of the Kubernetes Apiserver to connect to in the format of protocol://address:port, e.g., http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted. | | --apiserver-host string | Address of the Kubernetes API server. Takes the form "protocol://address:port". If not specified, it is assumed the program runs inside a Kubernetes cluster and local discovery is attempted. |
| `--configmap string` | Name of the ConfigMap that contains the custom configuration to use | | --configmap string | Name of the ConfigMap containing custom global configurations for the controller. |
| `--default-backend-service string` | Service used to serve a 404 page for the default backend. Takes the form namespace/name. The controller uses the first node port of this Service for the default backend. | | --default-backend-service string | Service used to serve HTTP requests not matching any known server name (catch-all). Takes the form "namespace/name". The controller configures NGINX to forward requests to the first port of this Service. |
| `--default-server-port int` | Default port to use for exposing the default server (catch all) (default 8181) | | --default-server-port int | Port to use for exposing the default server (catch-all). (default 8181) |
| `--default-ssl-certificate string` | Name of the secret that contains a SSL certificate to be used as default for a HTTPS catch-all server. Takes the form <namespace>/<secret name>. | | --default-ssl-certificate string | Secret containing a SSL certificate to be used by the default HTTPS server (catch-all). Takes the form "namespace/name". |
| `--election-id string` | Election id to use for status update. (default "ingress-controller-leader") | | --election-id string | Election id to use for Ingress status updates. (default "ingress-controller-leader") |
| `--enable-dynamic-configuration` | When enabled controller will try to avoid Nginx reloads as much as possible by using Lua. Disabled by default. | | --enable-dynamic-configuration | Dynamically refresh backends on topology changes instead of reloading NGINX. Feature backed by OpenResty Lua libraries. |
| `--enable-ssl-chain-completion` | Defines if the nginx ingress controller should check the secrets for missing intermediate CA certificates. If the certificate contain issues chain issues is not possible to enable OCSP. Default is true. (default true) | | --enable-ssl-chain-completion | Autocomplete SSL certificate chains with missing intermediate CA certificates. A valid certificate chain is required to enable OCSP stapling. Certificates uploaded to Kubernetes must have the "Authority Information Access" X.509 v3 extension for this to succeed. (default true) |
| `--enable-ssl-passthrough` | Enable SSL passthrough feature. Default is disabled | | --enable-ssl-passthrough | Enable SSL Passthrough. |
| `--force-namespace-isolation` | Force namespace isolation. This flag is required to avoid the reference of secrets or configmaps located in a different namespace than the specified in the flag --watch-namespace. | | --force-namespace-isolation | Force namespace isolation. Prevents Ingress objects from referencing Secrets and ConfigMaps located in a different namespace than their own. May be used together with watch-namespace. |
| `--health-check-path string` | Defines the URL to be used as health check inside in the default server in NGINX. (default "/healthz") | | --health-check-path string | URL path of the health check endpoint. Configured inside the NGINX status server. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. (default "/healthz") |
| `--healthz-port int` | port for healthz endpoint. (default 10254) | | --healthz-port int | Port to use for the healthz endpoint. (default 10254) |
| `--http-port int` | Indicates the port to use for HTTP traffic (default 80) | | --http-port int | Port to use for servicing HTTP traffic. (default 80) |
| `--https-port int` | Indicates the port to use for HTTPS traffic (default 443) | | --https-port int | Port to use for servicing HTTPS traffic. (default 443) |
| `--ingress-class string` | Name of the ingress class to route through this controller. | | --ingress-class string | Name of the ingress class this controller satisfies. The class of an Ingress object is set using the annotation "kubernetes.io/ingress.class". All ingress classes are satisfied if this parameter is left empty. |
| `--kubeconfig string` | Path to kubeconfig file with authorization and master location information. | | --kubeconfig string | Path to a kubeconfig file containing authorization and API server information. |
| `--log_backtrace_at traceLocation` | when logging hits line file:N, emit a stack trace (default :0) | | --log_backtrace_at traceLocation | when logging hits line file:N, emit a stack trace (default :0) |
| `--log_dir string` | If non-empty, write log files in this directory | | --log_dir string | If non-empty, write log files in this directory |
| `--logtostderr` | log to standard error instead of files (default true) | | --logtostderr | log to standard error instead of files (default true) |
| `--profiling` | Enable profiling via web interface host:port/debug/pprof/ (default true) | | --profiling | Enable profiling via web interface host:port/debug/pprof/ (default true) |
| `--publish-service string` | Service fronting the ingress controllers. Takes the form namespace/name. The controller will set the endpoint records on the ingress objects to reflect those on the service. | | --publish-service string | Service fronting the Ingress controller. Takes the form "namespace/name". When used together with update-status, the controller mirrors the address of this service's endpoints to the load-balancer status of all Ingress objects it satisfies. |
| `--publish-status-address string` | User customized address to be set in the status of ingress resources. The controller will set the endpoint records on the ingress using this address. | | --publish-status-address string | Customized address to set as the load-balancer status of Ingress objects this controller satisfies. Requires the update-status parameter. |
| `--report-node-internal-ip-address` | Defines if the nodes IP address to be returned in the ingress status should be the internal instead of the external IP address | | --report-node-internal-ip-address | Set the load-balancer status of Ingress objects to internal Node addresses instead of external. Requires the update-status parameter. |
| `--sort-backends` | Defines if backends and its endpoints should be sorted | | --sort-backends | Sort servers inside NGINX upstreams. |
| `--ssl-passtrough-proxy-port int` | Default port to use internally for SSL when SSL Passthgough is enabled (default 442) | | --ssl-passtrough-proxy-port int | Port to use internally for SSL Passthgough. (default 442) |
| `--status-port int` | Indicates the TCP port to use for exposing the nginx status page (default 18080) | | --status-port int | Port to use for exposing NGINX status pages. (default 18080) |
| `--stderrthreshold severity` | logs at or above this threshold go to stderr (default 2) | | --stderrthreshold severity | logs at or above this threshold go to stderr (default 2) |
| `--sync-period duration` | Relist and confirm cloud resources this often. Default is 10 minutes (default 10m0s) | | --sync-period duration | Period at which the controller forces the repopulation of its local object stores. (default 10m0s) |
| `--sync-rate-limit float32` | Define the sync frequency upper limit (default 0.3) | | --sync-rate-limit float32 | Define the sync frequency upper limit (default 0.3) |
| `--tcp-services-configmap string` | Name of the ConfigMap that contains the definition of the TCP services to expose. The key in the map indicates the external port to be used. The value is the name of the service with the format namespace/serviceName and the port of the service could be a number of the name of the port. The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend | | --tcp-services-configmap string | Name of the ConfigMap containing the definition of the TCP services to expose. The key in the map indicates the external port to be used. The value is a reference to a Service in the form "namespace/name:port", where "port" can either be a port number or name. TCP ports 80 and 443 are reserved by the controller for servicing HTTP traffic. |
| `--udp-services-configmap string` | Name of the ConfigMap that contains the definition of the UDP services to expose. The key in the map indicates the external port to be used. The value is the name of the service with the format namespace/serviceName and the port of the service could be a number of the name of the port. | | --udp-services-configmap string | Name of the ConfigMap containing the definition of the UDP services to expose. The key in the map indicates the external port to be used. The value is a reference to a Service in the form "namespace/name:port", where "port" can either be a port name or number.
| `--update-status` | Indicates if the ingress controller should update the Ingress status IP/hostname. Default is true (default true) | | --update-status | Update the load-balancer status of Ingress objects this controller satisfies. Requires setting the publish-service parameter to a valid Service reference. (default true) |
| `--update-status-on-shutdown` | Indicates if the ingress controller should update the Ingress status IP/hostname when the controller is being stopped. Default is true (default true) | | --update-status-on-shutdown | Update the load-balancer status of Ingress objects when the controller shuts down. Requires the update-status parameter. (default true) |
| `-v`, `--v Level` | log level for V logs | | --v Level | log level for V logs |
| `--version` | Shows release information about the NGINX Ingress controller | | --version | Show release information about the NGINX Ingress controller and exit. |
| `--vmodule moduleSpec` | comma-separated list of pattern=N settings for file-filtered logging | | --vmodule moduleSpec | comma-separated list of pattern=N settings for file-filtered logging |
| `--watch-namespace string` | Namespace to watch for Ingress. Default is to watch all namespaces | | --watch-namespace string | Namespace the controller watches for updates to Kubernetes objects. This includes Ingresses, Services and all configuration resources. All namespaces are watched if this parameter is left empty. |

View file

@ -1,19 +1,30 @@
# Custom errors # Custom errors
In case of an error in a request the body of the response is obtained from the `default backend`. When the [`custom-http-errors`][cm-custom-http-errors] option is enabled, the Ingress controller configures NGINX so
Each request to the default backend includes two headers: that it passes several HTTP headers down to its `default-backend` in case of error:
- `X-Code` indicates the HTTP code to be returned to the client. | Header | Value |
- `X-Format` the value of the `Accept` header. | ---------------- | ------------------------------------------------ |
| `X-Code` | HTTP status code retuned by the request |
| `X-Format` | Value of the `Accept` header sent by the client |
| `X-Original-URI` | URI that caused the error |
| `X-Namespace` | Namespace where the backend Service is located |
| `X-Ingress-Name` | Name of the Ingress where the backend is defined |
| `X-Service-Name` | Name of the Service backing the backend |
| `X-Service-Port` | Port number of the Service backing the backend |
A custom error backend can use this information to return the best possible representation of an error page. For
example, if the value of the `Accept` header send by the client was `application/json`, a carefully crafted backend
could decide to return the error payload as a JSON document instead of HTML.
!!! Important !!! Important
The custom backend must return the correct HTTP status code to be returned. NGINX does not change the response from the custom default backend. The custom backend is expected to return the correct HTTP status code instead of `200`. NGINX does not change
the response from the custom default backend.
Using these two headers it's possible to use a custom backend service like [this one](https://github.com/kubernetes/ingress-nginx/tree/master/images/custom-error-pages) that inspects each request and returns a custom error page with the format expected by the client. Please check the example [custom-errors](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-errors). An example of such custom backend is available inside the source repository at [images/custom-error-pages][img-custom-error-pages].
NGINX sends additional headers that can be used to build custom response: See also the [Custom errors][example-custom-errors] example.
- X-Original-URI [cm-custom-http-errors]: ./nginx-configuration/configmap.md#custom-http-errors
- X-Namespace [img-custom-error-pages]: https://github.com/kubernetes/ingress-nginx/tree/master/images/custom-error-pages
- X-Ingress-Name [example-custom-errors]: ../examples/customization/custom-errors
- X-Service-Name

View file

@ -44,6 +44,7 @@ You can add these Kubernetes annotations to specific Ingress objects to customiz
|[nginx.ingress.kubernetes.io/limit-rps](#rate-limiting)|number| |[nginx.ingress.kubernetes.io/limit-rps](#rate-limiting)|number|
|[nginx.ingress.kubernetes.io/permanent-redirect](#permanent-redirect)|string| |[nginx.ingress.kubernetes.io/permanent-redirect](#permanent-redirect)|string|
|[nginx.ingress.kubernetes.io/proxy-body-size](#custom-max-body-size)|string| |[nginx.ingress.kubernetes.io/proxy-body-size](#custom-max-body-size)|string|
|[nginx.ingress.kubernetes.io/proxy-cookie-domain](#proxy-cookie-domain)|string|
|[nginx.ingress.kubernetes.io/proxy-connect-timeout](#custom-timeouts)|number| |[nginx.ingress.kubernetes.io/proxy-connect-timeout](#custom-timeouts)|number|
|[nginx.ingress.kubernetes.io/proxy-send-timeout](#custom-timeouts)|number| |[nginx.ingress.kubernetes.io/proxy-send-timeout](#custom-timeouts)|number|
|[nginx.ingress.kubernetes.io/proxy-read-timeout](#custom-timeouts)|number| |[nginx.ingress.kubernetes.io/proxy-read-timeout](#custom-timeouts)|number|
@ -70,6 +71,7 @@ You can add these Kubernetes annotations to specific Ingress objects to customiz
|[nginx.ingress.kubernetes.io/upstream-vhost](#custom-nginx-upstream-vhost)|string| |[nginx.ingress.kubernetes.io/upstream-vhost](#custom-nginx-upstream-vhost)|string|
|[nginx.ingress.kubernetes.io/whitelist-source-range](#whitelist-source-range)|CIDR| |[nginx.ingress.kubernetes.io/whitelist-source-range](#whitelist-source-range)|CIDR|
|[nginx.ingress.kubernetes.io/proxy-buffering](#proxy-buffering)|string| |[nginx.ingress.kubernetes.io/proxy-buffering](#proxy-buffering)|string|
|[nginx.ingress.kubernetes.io/proxy-buffer-size](#proxy-buffer-size)|string|
|[nginx.ingress.kubernetes.io/ssl-ciphers](#ssl-ciphers)|string| |[nginx.ingress.kubernetes.io/ssl-ciphers](#ssl-ciphers)|string|
|[nginx.ingress.kubernetes.io/connection-proxy-header](#connection-proxy-header)|string| |[nginx.ingress.kubernetes.io/connection-proxy-header](#connection-proxy-header)|string|
|[nginx.ingress.kubernetes.io/enable-access-log](#enable-access-log)|"true" or "false"| |[nginx.ingress.kubernetes.io/enable-access-log](#enable-access-log)|"true" or "false"|
@ -150,7 +152,7 @@ nginx.ingress.kubernetes.io/auth-realm: "realm string"
NGINX exposes some flags in the [upstream configuration](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) that enable the configuration of each server in the upstream. The Ingress controller allows custom `max_fails` and `fail_timeout` parameters in a global context using `upstream-max-fails` and `upstream-fail-timeout` in the NGINX ConfigMap or in a particular Ingress rule. `upstream-max-fails` defaults to 0. This means NGINX will respect the container's `readinessProbe` if it is defined. If there is no probe and no values for `upstream-max-fails` NGINX will continue to send traffic to the container. NGINX exposes some flags in the [upstream configuration](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) that enable the configuration of each server in the upstream. The Ingress controller allows custom `max_fails` and `fail_timeout` parameters in a global context using `upstream-max-fails` and `upstream-fail-timeout` in the NGINX ConfigMap or in a particular Ingress rule. `upstream-max-fails` defaults to 0. This means NGINX will respect the container's `readinessProbe` if it is defined. If there is no probe and no values for `upstream-max-fails` NGINX will continue to send traffic to the container.
!!! tip !!! tip
With the default configuration NGINX will not health check your backends. Whenever the endpoints controller notices a readiness probe failure, that pod's IP will be removed from the list of endpoints. This will trigger the NGINX controller to also remove it from the upstreams.** With the default configuration NGINX will not health check your backends. Whenever the endpoints controller notices a readiness probe failure, that pod's IP will be removed from the list of endpoints. This will trigger the NGINX controller to also remove it from the upstreams.**
To use custom values in an Ingress rule define these annotations: To use custom values in an Ingress rule define these annotations:
@ -208,9 +210,9 @@ The annotations are:
!!! attention !!! attention
TLS with Client Authentication is **not** possible in Cloudflare and might result in unexpected behavior. TLS with Client Authentication is **not** possible in Cloudflare and might result in unexpected behavior.
Cloudflare only allows Authenticated Origin Pulls and is required to use their own certificate: [https://blog.cloudflare.com/protecting-the-origin-with-tls-authenticated-origin-pulls/](https://blog.cloudflare.com/protecting-the-origin-with-tls-authenticated-origin-pulls/) Cloudflare only allows Authenticated Origin Pulls and is required to use their own certificate: [https://blog.cloudflare.com/protecting-the-origin-with-tls-authenticated-origin-pulls/](https://blog.cloudflare.com/protecting-the-origin-with-tls-authenticated-origin-pulls/)
Only Authenticated Origin Pulls are allowed and can be configured by following their tutorial: [https://support.cloudflare.com/hc/en-us/articles/204494148-Setting-up-NGINX-to-use-TLS-Authenticated-Origin-Pulls](https://support.cloudflare.com/hc/en-us/articles/204494148-Setting-up-NGINX-to-use-TLS-Authenticated-Origin-Pulls) Only Authenticated Origin Pulls are allowed and can be configured by following their tutorial: [https://support.cloudflare.com/hc/en-us/articles/204494148-Setting-up-NGINX-to-use-TLS-Authenticated-Origin-Pulls](https://support.cloudflare.com/hc/en-us/articles/204494148-Setting-up-NGINX-to-use-TLS-Authenticated-Origin-Pulls)
@ -464,6 +466,12 @@ To use custom values in an Ingress rule define these annotation:
nginx.ingress.kubernetes.io/proxy-body-size: 8m nginx.ingress.kubernetes.io/proxy-body-size: 8m
``` ```
### Proxy cookie domain
Sets a text that [should be changed in the domain attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain) of the "Set-Cookie" header fields of a proxied server response.
To configure this setting globally for all Ingress rules, the `proxy-cookie-domain` value may be set in the [NGINX ConfigMap][configmap].
### Proxy buffering ### Proxy buffering
Enable or disable proxy buffering [`proxy_buffering`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering). Enable or disable proxy buffering [`proxy_buffering`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering).
@ -476,6 +484,16 @@ To use custom values in an Ingress rule define these annotation:
nginx.ingress.kubernetes.io/proxy-buffering: "on" nginx.ingress.kubernetes.io/proxy-buffering: "on"
``` ```
### Proxy buffer size
Sets the size of the buffer [`proxy_buffer_size`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) used for reading the first part of the response received from the proxied server.
By default proxy buffer size is set as "4k"
To configure this setting globally, set `proxy-buffer-size` in [NGINX ConfigMap][configmap]. To use custom values in an Ingress rule, define this annotation:
```yaml
nginx.ingress.kubernetes.io/proxy-buffer-size: "8k"
```
### SSL ciphers ### SSL ciphers
Specifies the [enabled ciphers](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers). Specifies the [enabled ciphers](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers).
@ -579,4 +597,3 @@ To use the module in the Kubernetes Nginx ingress controller, you have two optio
- Use an InfluxDB server configured to enable the [UDP protocol](https://docs.influxdata.com/influxdb/v1.5/supported_protocols/udp/). - Use an InfluxDB server configured to enable the [UDP protocol](https://docs.influxdata.com/influxdb/v1.5/supported_protocols/udp/).
- Deploy Telegraf as a sidecar proxy to the Ingress controller configured to listen UDP with the [socket listener input](https://github.com/influxdata/telegraf/tree/release-1.6/plugins/inputs/socket_listener) and to write using - Deploy Telegraf as a sidecar proxy to the Ingress controller configured to listen UDP with the [socket listener input](https://github.com/influxdata/telegraf/tree/release-1.6/plugins/inputs/socket_listener) and to write using
anyone of the [outputs plugins](https://github.com/influxdata/telegraf/tree/release-1.6/plugins/outputs) anyone of the [outputs plugins](https://github.com/influxdata/telegraf/tree/release-1.6/plugins/outputs)

View file

@ -44,10 +44,6 @@ The following table shows a configuration option's name, type, and the default v
|[disable-ipv6-dns](#disable-ipv6-dns)|bool|false| |[disable-ipv6-dns](#disable-ipv6-dns)|bool|false|
|[enable-underscores-in-headers](#enable-underscores-in-headers)|bool|false| |[enable-underscores-in-headers](#enable-underscores-in-headers)|bool|false|
|[ignore-invalid-headers](#ignore-invalid-headers)|bool|true| |[ignore-invalid-headers](#ignore-invalid-headers)|bool|true|
|[enable-vts-status](#enable-vts-status)|bool|false|
|[vts-status-zone-size](#vts-status-zone-size)|string|"10m"|
|[vts-sum-key](#vts-sum-key)|string|"*"|
|[vts-default-filter-key](#vts-default-filter-key)|string|"$geoip_country_code country::*"|
|[retry-non-idempotent](#retry-non-idempotent)|bool|"false"| |[retry-non-idempotent](#retry-non-idempotent)|bool|"false"|
|[error-log-level](#error-log-level)|string|"notice"| |[error-log-level](#error-log-level)|string|"notice"|
|[http2-max-field-size](#http2-max-field-size)|string|"4k"| |[http2-max-field-size](#http2-max-field-size)|string|"4k"|
@ -240,32 +236,6 @@ Enables underscores in header names. _**default:**_ is disabled
Set if header fields with invalid names should be ignored. Set if header fields with invalid names should be ignored.
_**default:**_ is enabled _**default:**_ is enabled
## enable-vts-status
Allows the replacement of the default status page with a third party module named [nginx-module-vts](https://github.com/vozlt/nginx-module-vts).
_**default:**_ is disabled
## vts-status-zone-size
Vts config on http level sets parameters for a shared memory zone that will keep states for various keys. The cache is shared between all worker processes. _**default:**_ 10m
_References:_
[https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_zone](https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_zone)
## vts-default-filter-key
Vts config on http level enables the keys by user defined variable. The key is a key string to calculate traffic. The name is a group string to calculate traffic. The key and name can contain variables such as $host, $server_name. The name's group belongs to filterZones if specified. The key's group belongs to serverZones if not specified second argument name. _**default:**_ $geoip_country_code country::*
_References:_
[https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key](https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key)
## vts-sum-key
For metrics keyed (or when using Prometheus, labeled) by server zone, this value is used to indicate metrics for all server zones combined. _**default:**_ *
_References:_
[https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_display_sum_key](https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_display_sum_key)
## retry-non-idempotent ## retry-non-idempotent
Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server. The previous behavior can be restored using the value "true". Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server. The previous behavior can be restored using the value "true".

View file

@ -2,10 +2,3 @@
The [ngx_http_stub_status_module](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) module provides access to basic status information. The [ngx_http_stub_status_module](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) module provides access to basic status information.
This is the default module active in the url `/nginx_status` in the status port (default is 18080). This is the default module active in the url `/nginx_status` in the status port (default is 18080).
This controller provides an alternative to this module using the [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) module.
To use this module just set in the configuration configmap `enable-vts-status: "true"`.
![nginx-module-vts screenshot](https://cloud.githubusercontent.com/assets/3648408/10876811/77a67b70-8183-11e5-9924-6a6d0c5dc73a.png "screenshot with filter")
To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json`

View file

@ -3,7 +3,7 @@ all: all-container
BUILDTAGS= BUILDTAGS=
# Use the 0.0 tag for testing, it shouldn't clobber any release builds # Use the 0.0 tag for testing, it shouldn't clobber any release builds
TAG?=0.1 TAG?=0.3
REGISTRY?=quay.io/kubernetes-ingress-controller REGISTRY?=quay.io/kubernetes-ingress-controller
GOOS?=linux GOOS?=linux
DOCKER?=docker DOCKER?=docker
@ -26,11 +26,11 @@ ARCH ?= $(shell go env GOARCH)
GOARCH = ${ARCH} GOARCH = ${ARCH}
DUMB_ARCH = ${ARCH} DUMB_ARCH = ${ARCH}
BASEIMAGE?=alpine:3.6 BASEIMAGE?=alpine:3.7
ALL_ARCH = amd64 arm arm64 ppc64le ALL_ARCH = amd64 arm arm64 ppc64le
QEMUVERSION=v2.9.1 QEMUVERSION=v2.12.0
IMGNAME = custom-error-pages IMGNAME = custom-error-pages
IMAGE = $(REGISTRY)/$(IMGNAME) IMAGE = $(REGISTRY)/$(IMGNAME)
@ -74,7 +74,7 @@ ifeq ($(ARCH),amd64)
else else
# When cross-building, only the placeholder "CROSS_BUILD_" should be removed # When cross-building, only the placeholder "CROSS_BUILD_" should be removed
# Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel # Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel
$(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset # $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset
curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)/rootfs curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)/rootfs
$(SED_I) "s/CROSS_BUILD_//g" $(DOCKERFILE) $(SED_I) "s/CROSS_BUILD_//g" $(DOCKERFILE)
endif endif
@ -103,3 +103,8 @@ build: clean
release: all-container all-push release: all-container all-push
echo "done" echo "done"
.PHONY: register-qemu
register-qemu:
# Register /usr/bin/qemu-ARCH-static as the handler for binaries in multiple platforms
$(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset

View file

@ -1,2 +1,3 @@
# custom-error-pages
Example of Custom error pages for the NGINX Ingress controller Example of Custom error pages for the NGINX Ingress controller

View file

@ -39,15 +39,34 @@ const (
// ContentType name of the header that defines the format of the reply // ContentType name of the header that defines the format of the reply
ContentType = "Content-Type" ContentType = "Content-Type"
// OriginalURI name of the header with the original URL from NGINX
OriginalURI = "X-Original-URI"
// Namespace name of the header that contains information about the Ingress namespace
Namespace = "X-Namespace"
// IngressName name of the header that contains the matched Ingress
IngressName = "X-Ingress-Name"
// ServiceName name of the header that contains the matched Service in the Ingress
ServiceName = "X-Service-Name"
// ServicePort name of the header that contains the matched Service port in the Ingress
ServicePort = "X-Service-Port"
// ErrFilesPathVar is the name of the environment variable indicating
// the location on disk of files served by the handler.
ErrFilesPathVar = "ERROR_FILES_PATH"
) )
func main() { func main() {
path := "/www" errFilesPath := "/www"
if os.Getenv("PATH") != "" { if os.Getenv(ErrFilesPathVar) != "" {
path = os.Getenv("PATH") errFilesPath = os.Getenv(ErrFilesPathVar)
} }
http.HandleFunc("/", errorHandler(path)) http.HandleFunc("/", errorHandler(errFilesPath))
http.Handle("/metrics", promhttp.Handler()) http.Handle("/metrics", promhttp.Handler())
@ -63,18 +82,28 @@ func errorHandler(path string) func(http.ResponseWriter, *http.Request) {
start := time.Now() start := time.Now()
ext := "html" ext := "html"
if os.Getenv("DEBUG") != "" {
w.Header().Set(FormatHeader, r.Header.Get(FormatHeader))
w.Header().Set(CodeHeader, r.Header.Get(CodeHeader))
w.Header().Set(ContentType, r.Header.Get(ContentType))
w.Header().Set(OriginalURI, r.Header.Get(OriginalURI))
w.Header().Set(Namespace, r.Header.Get(Namespace))
w.Header().Set(IngressName, r.Header.Get(IngressName))
w.Header().Set(ServiceName, r.Header.Get(ServiceName))
w.Header().Set(ServicePort, r.Header.Get(ServicePort))
}
format := r.Header.Get(FormatHeader) format := r.Header.Get(FormatHeader)
if format == "" { if format == "" {
format = "text/html" format = "text/html"
log.Printf("forma not specified. Using %v\n", format) log.Printf("format not specified. Using %v", format)
} }
mediaType, _, _ := mime.ParseMediaType(format) cext, err := mime.ExtensionsByType(format)
cext, err := mime.ExtensionsByType(mediaType)
if err != nil { if err != nil {
log.Printf("unexpected error reading media type extension: %v. Using %v\n", err, ext) log.Printf("unexpected error reading media type extension: %v. Using %v", err, ext)
} else if len(cext) == 0 { } else if len(cext) == 0 {
log.Printf("couldn't get media type extension. Using %v\n", ext) log.Printf("couldn't get media type extension. Using %v", ext)
} else { } else {
ext = cext[0] ext = cext[0]
} }
@ -84,7 +113,7 @@ func errorHandler(path string) func(http.ResponseWriter, *http.Request) {
code, err := strconv.Atoi(errCode) code, err := strconv.Atoi(errCode)
if err != nil { if err != nil {
code = 404 code = 404
log.Printf("unexpected error reading return code: %v. Using %v\n", err, code) log.Printf("unexpected error reading return code: %v. Using %v", err, code)
} }
w.WriteHeader(code) w.WriteHeader(code)
@ -94,22 +123,22 @@ func errorHandler(path string) func(http.ResponseWriter, *http.Request) {
file := fmt.Sprintf("%v/%v%v", path, code, ext) file := fmt.Sprintf("%v/%v%v", path, code, ext)
f, err := os.Open(file) f, err := os.Open(file)
if err != nil { if err != nil {
log.Printf("unexpected error opening file: %v\n", err) log.Printf("unexpected error opening file: %v", err)
scode := strconv.Itoa(code) scode := strconv.Itoa(code)
file := fmt.Sprintf("%v/%cxx%v", path, scode[0], ext) file := fmt.Sprintf("%v/%cxx%v", path, scode[0], ext)
f, err := os.Open(file) f, err := os.Open(file)
if err != nil { if err != nil {
log.Printf("unexpected error opening file: %v\n", err) log.Printf("unexpected error opening file: %v", err)
http.NotFound(w, r) http.NotFound(w, r)
return return
} }
defer f.Close() defer f.Close()
log.Printf("serving custom error response for code %v and format %v from file %v\n", code, format, file) log.Printf("serving custom error response for code %v and format %v from file %v", code, format, file)
io.Copy(w, f) io.Copy(w, f)
return return
} }
defer f.Close() defer f.Close()
log.Printf("serving custom error response for code %v and format %v from file %v\n", code, format, file) log.Printf("serving custom error response for code %v and format %v from file %v", code, format, file)
io.Copy(w, f) io.Copy(w, f)
duration := time.Now().Sub(start).Seconds() duration := time.Now().Sub(start).Seconds()

File diff suppressed because it is too large Load diff

View file

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
# 0.0.0 shouldn't clobber any released builds # 0.0.0 shouldn't clobber any released builds
TAG ?= 0.50 TAG ?= 0.52
REGISTRY ?= quay.io/kubernetes-ingress-controller REGISTRY ?= quay.io/kubernetes-ingress-controller
ARCH ?= $(shell go env GOARCH) ARCH ?= $(shell go env GOARCH)
DOCKER ?= docker DOCKER ?= docker

View file

@ -6,7 +6,6 @@ nginx [engine x] is an HTTP and reverse proxy server, a mail proxy server, and a
This custom nginx image contains: This custom nginx image contains:
- [stream](http://nginx.org/en/docs/stream/ngx_stream_core_module.html) tcp support for upstreams - [stream](http://nginx.org/en/docs/stream/ngx_stream_core_module.html) tcp support for upstreams
- nginx stats [nginx-module-vts](https://github.com/vozlt/nginx-module-vts)
- [Dynamic TLS record sizing](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/) - [Dynamic TLS record sizing](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/)
- [ngx_devel_kit](https://github.com/simpl/ngx_devel_kit) - [ngx_devel_kit](https://github.com/simpl/ngx_devel_kit)
- [set-misc-nginx-module](https://github.com/openresty/set-misc-nginx-module) - [set-misc-nginx-module](https://github.com/openresty/set-misc-nginx-module)

View file

@ -20,8 +20,7 @@ set -o nounset
set -o pipefail set -o pipefail
export NGINX_VERSION=1.13.12 export NGINX_VERSION=1.13.12
export NDK_VERSION=0.3.0 export NDK_VERSION=0.3.1rc1
export VTS_VERSION=0.1.16
export SETMISC_VERSION=0.31 export SETMISC_VERSION=0.31
export STICKY_SESSIONS_VERSION=08a395c66e42 export STICKY_SESSIONS_VERSION=08a395c66e42
export MORE_HEADERS_VERSION=0.33 export MORE_HEADERS_VERSION=0.33
@ -35,7 +34,8 @@ export MODSECURITY_VERSION=1.0.0
export LUA_NGX_VERSION=0.10.13 export LUA_NGX_VERSION=0.10.13
export LUA_UPSTREAM_VERSION=0.07 export LUA_UPSTREAM_VERSION=0.07
export COOKIE_FLAG_VERSION=1.1.0 export COOKIE_FLAG_VERSION=1.1.0
export NGINX_INFLUXDB_VERSION=f8732268d44aea706ecf8d9c6036e9b6dacc99b2 export NGINX_INFLUXDB_VERSION=f20cfb2458c338f162132f5a21eb021e2cbe6383
export GEOIP2_VERSION=2.0
export BUILD_PATH=/tmp/build export BUILD_PATH=/tmp/build
@ -88,21 +88,22 @@ clean-install \
lua-cjson \ lua-cjson \
python \ python \
luarocks \ luarocks \
libmaxminddb-dev \
|| exit 1 || exit 1
if [[ ${ARCH} == "x86_64" ]]; then if [[ ${ARCH} == "x86_64" ]]; then
ln -s /usr/lib/x86_64-linux-gnu/liblua5.1.so /usr/lib/liblua.so ln -s /usr/lib/x86_64-linux-gnu/liblua5.1.so /usr/lib/liblua.so
ln -s /usr/lib/x86_64-linux-gnu /usr/lib/lua-platform-path ln -s /usr/lib/x86_64-linux-gnu /usr/lib/lua-platform-path
fi fi
if [[ ${ARCH} == "armv7l" ]]; then if [[ ${ARCH} == "armv7l" ]]; then
ln -s /usr/lib/arm-linux-gnueabihf/liblua5.1.so /usr/lib/liblua.so ln -s /usr/lib/arm-linux-gnueabihf/liblua5.1.so /usr/lib/liblua.so
ln -s /usr/lib/arm-linux-gnueabihf /usr/lib/lua-platform-path ln -s /usr/lib/arm-linux-gnueabihf /usr/lib/lua-platform-path
fi fi
if [[ ${ARCH} == "aarch64" ]]; then if [[ ${ARCH} == "aarch64" ]]; then
ln -s /usr/lib/aarch64-linux-gnu/liblua5.1.so /usr/lib/liblua.so ln -s /usr/lib/aarch64-linux-gnu/liblua5.1.so /usr/lib/liblua.so
ln -s /usr/lib/aarch64-linux-gnu /usr/lib/lua-platform-path ln -s /usr/lib/aarch64-linux-gnu /usr/lib/lua-platform-path
fi fi
if [[ ${ARCH} == "ppc64le" ]]; then if [[ ${ARCH} == "ppc64le" ]]; then
@ -130,6 +131,8 @@ function geoip_get {
geoip_get "GeoIP.dat.gz" "https://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz" geoip_get "GeoIP.dat.gz" "https://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz"
geoip_get "GeoLiteCity.dat.gz" "https://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz" geoip_get "GeoLiteCity.dat.gz" "https://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz"
geoip_get "GeoIPASNum.dat.gz" "http://download.maxmind.com/download/geoip/database/asnum/GeoIPASNum.dat.gz" geoip_get "GeoIPASNum.dat.gz" "http://download.maxmind.com/download/geoip/database/asnum/GeoIPASNum.dat.gz"
geoip_get "GeoLite2-City.mmdb.gz" "http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz"
geoip_get "GeoLite2-ASN.mmdb.gz" "http://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN.tar.gz"
mkdir --verbose -p "$BUILD_PATH" mkdir --verbose -p "$BUILD_PATH"
cd "$BUILD_PATH" cd "$BUILD_PATH"
@ -138,15 +141,12 @@ cd "$BUILD_PATH"
get_src fb92f5602cdb8d3ab1ad47dbeca151b185d62eedb67d347bbe9d79c1438c85de \ get_src fb92f5602cdb8d3ab1ad47dbeca151b185d62eedb67d347bbe9d79c1438c85de \
"http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz" "http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz"
get_src 88e05a99a8a7419066f5ae75966fb1efc409bad4522d14986da074554ae61619 \ get_src 49f50d4cd62b166bc1aaf712febec5e028d9f187cedbc27a610dfd01bdde2d36 \
"https://github.com/simpl/ngx_devel_kit/archive/v$NDK_VERSION.tar.gz" "https://github.com/simpl/ngx_devel_kit/archive/v$NDK_VERSION.tar.gz"
get_src 97946a68937b50ab8637e1a90a13198fe376d801dc3e7447052e43c28e9ee7de \ get_src 97946a68937b50ab8637e1a90a13198fe376d801dc3e7447052e43c28e9ee7de \
"https://github.com/openresty/set-misc-nginx-module/archive/v$SETMISC_VERSION.tar.gz" "https://github.com/openresty/set-misc-nginx-module/archive/v$SETMISC_VERSION.tar.gz"
get_src c668d0ed38afbba12f0224cb8cf5d70dcb9388723766dfb40d00539f887186fa \
"https://github.com/vozlt/nginx-module-vts/archive/v$VTS_VERSION.tar.gz"
get_src a3dcbab117a9c103bc1ea5200fc00a7b7d2af97ff7fd525f16f8ac2632e30fbf \ get_src a3dcbab117a9c103bc1ea5200fc00a7b7d2af97ff7fd525f16f8ac2632e30fbf \
"https://github.com/openresty/headers-more-nginx-module/archive/v$MORE_HEADERS_VERSION.tar.gz" "https://github.com/openresty/headers-more-nginx-module/archive/v$MORE_HEADERS_VERSION.tar.gz"
@ -213,9 +213,12 @@ get_src d81b33129c6fb5203b571fa4d8394823bf473d8872c0357a1d0f14420b1483bd \
get_src 76d8638a350a0484b3d6658e329ba38bb831d407eaa6dce2a084a27a22063133 \ get_src 76d8638a350a0484b3d6658e329ba38bb831d407eaa6dce2a084a27a22063133 \
"https://github.com/openresty/luajit2/archive/v2.1-20180420.tar.gz" "https://github.com/openresty/luajit2/archive/v2.1-20180420.tar.gz"
get_src e41589bd88953276c16c4817ab9b4faba1aca21d9bb70a8c1714505176c16ae4 \ get_src 1897d7677d99c1cedeb95b2eb00652a4a7e8e604304c3053a93bd3ba7dd82884 \
"https://github.com/influxdata/nginx-influxdb-module/archive/$NGINX_INFLUXDB_VERSION.tar.gz" "https://github.com/influxdata/nginx-influxdb-module/archive/$NGINX_INFLUXDB_VERSION.tar.gz"
get_src ebb4652c4f9a2e1ee31fddefc4c93ff78e651a4b2727d3453d026bccbd708d99 \
"https://github.com/leev/ngx_http_geoip2_module/archive/${GEOIP2_VERSION}.tar.gz"
# improve compilation times # improve compilation times
CORES=$(($(grep -c ^processor /proc/cpuinfo) - 0)) CORES=$(($(grep -c ^processor /proc/cpuinfo) - 0))
@ -373,6 +376,7 @@ Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-999-EXCLUSION-RULES-AFTE
cd "$BUILD_PATH/nginx-$NGINX_VERSION" cd "$BUILD_PATH/nginx-$NGINX_VERSION"
WITH_FLAGS="--with-debug \ WITH_FLAGS="--with-debug \
--with-compat \
--with-pcre-jit \ --with-pcre-jit \
--with-http_ssl_module \ --with-http_ssl_module \
--with-http_stub_status_module \ --with-http_stub_status_module \
@ -405,7 +409,6 @@ fi
WITH_MODULES="--add-module=$BUILD_PATH/ngx_devel_kit-$NDK_VERSION \ WITH_MODULES="--add-module=$BUILD_PATH/ngx_devel_kit-$NDK_VERSION \
--add-module=$BUILD_PATH/set-misc-nginx-module-$SETMISC_VERSION \ --add-module=$BUILD_PATH/set-misc-nginx-module-$SETMISC_VERSION \
--add-module=$BUILD_PATH/nginx-module-vts-$VTS_VERSION \
--add-module=$BUILD_PATH/headers-more-nginx-module-$MORE_HEADERS_VERSION \ --add-module=$BUILD_PATH/headers-more-nginx-module-$MORE_HEADERS_VERSION \
--add-module=$BUILD_PATH/nginx-goodies-nginx-sticky-module-ng-$STICKY_SESSIONS_VERSION \ --add-module=$BUILD_PATH/nginx-goodies-nginx-sticky-module-ng-$STICKY_SESSIONS_VERSION \
--add-module=$BUILD_PATH/nginx-http-auth-digest-$NGINX_DIGEST_AUTH \ --add-module=$BUILD_PATH/nginx-http-auth-digest-$NGINX_DIGEST_AUTH \
@ -418,6 +421,7 @@ WITH_MODULES="--add-module=$BUILD_PATH/ngx_devel_kit-$NDK_VERSION \
--add-dynamic-module=$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING_VERSION/jaeger \ --add-dynamic-module=$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING_VERSION/jaeger \
--add-dynamic-module=$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING_VERSION/zipkin \ --add-dynamic-module=$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING_VERSION/zipkin \
--add-dynamic-module=$BUILD_PATH/ModSecurity-nginx-$MODSECURITY_VERSION \ --add-dynamic-module=$BUILD_PATH/ModSecurity-nginx-$MODSECURITY_VERSION \
--add-dynamic-module=$BUILD_PATH/ngx_http_geoip2_module-${GEOIP2_VERSION} \
--add-module=$BUILD_PATH/ngx_brotli" --add-module=$BUILD_PATH/ngx_brotli"
./configure \ ./configure \

View file

@ -25,6 +25,12 @@ import (
"k8s.io/kubernetes/pkg/util/filesystem" "k8s.io/kubernetes/pkg/util/filesystem"
) )
// ReadWriteByUser defines linux permission to read and write files for the owner user
const ReadWriteByUser = 0660
// ReadByUserGroup defines linux permission to read files by the user and group owner/s
const ReadByUserGroup = 0640
// Filesystem is an interface that we can use to mock various filesystem operations // Filesystem is an interface that we can use to mock various filesystem operations
type Filesystem interface { type Filesystem interface {
filesystem.Filesystem filesystem.Filesystem
@ -35,7 +41,7 @@ func NewLocalFS() (Filesystem, error) {
fs := filesystem.DefaultFs{} fs := filesystem.DefaultFs{}
for _, directory := range directories { for _, directory := range directories {
err := fs.MkdirAll(directory, 0655) err := fs.MkdirAll(directory, ReadWriteByUser)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -97,12 +103,5 @@ func NewFakeFS() (Filesystem, error) {
} }
} }
fakeFs.MkdirAll("/run", 0655)
fakeFs.MkdirAll("/proc", 0655)
fakeFs.MkdirAll("/etc/nginx/template", 0655)
fakeFs.MkdirAll(DefaultSSLDirectory, 0655)
fakeFs.MkdirAll(AuthDirectory, 0655)
return fakeFs, nil return fakeFs, nil
} }

View file

@ -54,7 +54,6 @@ import (
"k8s.io/ingress-nginx/internal/ingress/annotations/sslpassthrough" "k8s.io/ingress-nginx/internal/ingress/annotations/sslpassthrough"
"k8s.io/ingress-nginx/internal/ingress/annotations/upstreamhashby" "k8s.io/ingress-nginx/internal/ingress/annotations/upstreamhashby"
"k8s.io/ingress-nginx/internal/ingress/annotations/upstreamvhost" "k8s.io/ingress-nginx/internal/ingress/annotations/upstreamvhost"
"k8s.io/ingress-nginx/internal/ingress/annotations/vtsfilterkey"
"k8s.io/ingress-nginx/internal/ingress/annotations/xforwardedprefix" "k8s.io/ingress-nginx/internal/ingress/annotations/xforwardedprefix"
"k8s.io/ingress-nginx/internal/ingress/errors" "k8s.io/ingress-nginx/internal/ingress/errors"
"k8s.io/ingress-nginx/internal/ingress/resolver" "k8s.io/ingress-nginx/internal/ingress/resolver"
@ -90,7 +89,6 @@ type Ingress struct {
UpstreamHashBy string UpstreamHashBy string
LoadBalancing string LoadBalancing string
UpstreamVhost string UpstreamVhost string
VtsFilterKey string
Whitelist ipwhitelist.SourceRange Whitelist ipwhitelist.SourceRange
XForwardedPrefix bool XForwardedPrefix bool
SSLCiphers string SSLCiphers string
@ -132,7 +130,6 @@ func NewAnnotationExtractor(cfg resolver.Resolver) Extractor {
"UpstreamHashBy": upstreamhashby.NewParser(cfg), "UpstreamHashBy": upstreamhashby.NewParser(cfg),
"LoadBalancing": loadbalancing.NewParser(cfg), "LoadBalancing": loadbalancing.NewParser(cfg),
"UpstreamVhost": upstreamvhost.NewParser(cfg), "UpstreamVhost": upstreamvhost.NewParser(cfg),
"VtsFilterKey": vtsfilterkey.NewParser(cfg),
"Whitelist": ipwhitelist.NewParser(cfg), "Whitelist": ipwhitelist.NewParser(cfg),
"XForwardedPrefix": xforwardedprefix.NewParser(cfg), "XForwardedPrefix": xforwardedprefix.NewParser(cfg),
"SSLCiphers": sslcipher.NewParser(cfg), "SSLCiphers": sslcipher.NewParser(cfg),

View file

@ -19,8 +19,6 @@ package auth
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os"
"path"
"regexp" "regexp"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -86,17 +84,6 @@ type auth struct {
// NewParser creates a new authentication annotation parser // NewParser creates a new authentication annotation parser
func NewParser(authDirectory string, r resolver.Resolver) parser.IngressAnnotation { func NewParser(authDirectory string, r resolver.Resolver) parser.IngressAnnotation {
os.MkdirAll(authDirectory, 0755)
currPath := authDirectory
for currPath != "/" {
currPath = path.Dir(currPath)
err := os.Chmod(currPath, 0755)
if err != nil {
break
}
}
return auth{r, authDirectory} return auth{r, authDirectory}
} }
@ -157,8 +144,7 @@ func dumpSecret(filename string, secret *api.Secret) error {
} }
} }
// TODO: check permissions required err := ioutil.WriteFile(filename, val, file.ReadWriteByUser)
err := ioutil.WriteFile(filename, val, 0777)
if err != nil { if err != nil {
return ing_errors.LocationDenied{ return ing_errors.LocationDenied{
Reason: errors.Wrap(err, "unexpected error creating password file"), Reason: errors.Wrap(err, "unexpected error creating password file"),

View file

@ -1,40 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vtsfilterkey
import (
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/ingress-nginx/internal/ingress/annotations/parser"
"k8s.io/ingress-nginx/internal/ingress/resolver"
)
type vtsFilterKey struct {
r resolver.Resolver
}
// NewParser creates a new vts filter key annotation parser
func NewParser(r resolver.Resolver) parser.IngressAnnotation {
return vtsFilterKey{r}
}
// Parse parses the annotations contained in the ingress rule
// used to indicate if the location/s contains a fragment of
// configuration to be included inside the paths of the rules
func (a vtsFilterKey) Parse(ing *extensions.Ingress) (interface{}, error) {
return parser.GetStringAnnotation("vts-filter-key", ing)
}

View file

@ -26,6 +26,8 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
const nginxPID = "/tmp/nginx.pid"
// Name returns the healthcheck name // Name returns the healthcheck name
func (n NGINXController) Name() string { func (n NGINXController) Name() string {
return "nginx-ingress-controller" return "nginx-ingress-controller"
@ -58,13 +60,13 @@ func (n *NGINXController) Check(_ *http.Request) error {
if err != nil { if err != nil {
return errors.Wrap(err, "unexpected error reading /proc directory") return errors.Wrap(err, "unexpected error reading /proc directory")
} }
f, err := n.fileSystem.ReadFile("/run/nginx.pid") f, err := n.fileSystem.ReadFile(nginxPID)
if err != nil { if err != nil {
return errors.Wrap(err, "unexpected error reading /run/nginx.pid") return errors.Wrapf(err, "unexpected error reading %v", nginxPID)
} }
pid, err := strconv.Atoi(strings.TrimRight(string(f), "\r\n")) pid, err := strconv.Atoi(strings.TrimRight(string(f), "\r\n"))
if err != nil { if err != nil {
return errors.Wrap(err, "unexpected error reading the PID from /run/nginx.pid") return errors.Wrapf(err, "unexpected error reading the nginx PID from %v", nginxPID)
} }
_, err = fs.NewProc(pid) _, err = fs.NewProc(pid)

View file

@ -27,6 +27,7 @@ import (
"k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/healthz"
"k8s.io/kubernetes/pkg/util/filesystem" "k8s.io/kubernetes/pkg/util/filesystem"
"k8s.io/ingress-nginx/internal/file"
ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config"
) )
@ -60,8 +61,8 @@ func TestNginxCheck(t *testing.T) {
}) })
// create pid file // create pid file
fs.MkdirAll("/run", 0655) fs.MkdirAll("/tmp", file.ReadWriteByUser)
pidFile, err := fs.Create("/run/nginx.pid") pidFile, err := fs.Create(nginxPID)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }

View file

@ -161,31 +161,6 @@ type Configuration struct {
// By default this is enabled // By default this is enabled
IgnoreInvalidHeaders bool `json:"ignore-invalid-headers"` IgnoreInvalidHeaders bool `json:"ignore-invalid-headers"`
// EnableVtsStatus allows the replacement of the default status page with a third party module named
// nginx-module-vts - https://github.com/vozlt/nginx-module-vts
// By default this is disabled
EnableVtsStatus bool `json:"enable-vts-status,omitempty"`
// Vts config on http level
// Description: Sets parameters for a shared memory zone that will keep states for various keys. The cache is shared between all worker processe
// https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_zone
// Default value is 10m
VtsStatusZoneSize string `json:"vts-status-zone-size,omitempty"`
// Vts config on http level
// Description: Enables the keys by user defined variable. The key is a key string to calculate traffic.
// The name is a group string to calculate traffic. The key and name can contain variables such as $host,
// $server_name. The name's group belongs to filterZones if specified. The key's group belongs to serverZones
// if not specified second argument name. The example with geoip module is as follows:
// https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key
// Default value is $geoip_country_code country::*
VtsDefaultFilterKey string `json:"vts-default-filter-key,omitempty"`
// Description: Sets sum key used by vts json output, and the sum label in prometheus output.
// These indicate metrics values for all server zones combined, rather than for a specific one.
// Default value is *
VtsSumKey string `json:"vts-sum-key,omitempty"`
// RetryNonIdempotent since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) // RetryNonIdempotent since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH)
// in case of an error. The previous behavior can be restored using the value true // in case of an error. The previous behavior can be restored using the value true
RetryNonIdempotent bool `json:"retry-non-idempotent"` RetryNonIdempotent bool `json:"retry-non-idempotent"`
@ -531,6 +506,9 @@ type Configuration struct {
// http://github.com/influxdata/nginx-influxdb-module/ // http://github.com/influxdata/nginx-influxdb-module/
// By default this is disabled // By default this is disabled
EnableInfluxDB bool `json:"enable-influxdb"` EnableInfluxDB bool `json:"enable-influxdb"`
// Checksum contains a checksum of the configmap configuration
Checksum string `json:"-"`
} }
// NewDefault returns the default nginx configuration // NewDefault returns the default nginx configuration
@ -603,9 +581,6 @@ func NewDefault() Configuration {
WorkerProcesses: strconv.Itoa(runtime.NumCPU()), WorkerProcesses: strconv.Itoa(runtime.NumCPU()),
WorkerShutdownTimeout: "10s", WorkerShutdownTimeout: "10s",
LoadBalanceAlgorithm: defaultLoadBalancerAlgorithm, LoadBalanceAlgorithm: defaultLoadBalancerAlgorithm,
VtsStatusZoneSize: "10m",
VtsDefaultFilterKey: "$geoip_country_code country::*",
VtsSumKey: "*",
VariablesHashBucketSize: 128, VariablesHashBucketSize: 128,
VariablesHashMaxSize: 2048, VariablesHashMaxSize: 2048,
UseHTTP2: true, UseHTTP2: true,

View file

@ -22,7 +22,6 @@ import (
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"sync/atomic"
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
@ -61,15 +60,15 @@ type Configuration struct {
ForceNamespaceIsolation bool ForceNamespaceIsolation bool
// optional // +optional
TCPConfigMapName string TCPConfigMapName string
// optional // +optional
UDPConfigMapName string UDPConfigMapName string
DefaultHealthzURL string DefaultHealthzURL string
DefaultSSLCertificate string DefaultSSLCertificate string
// optional // +optional
PublishService string PublishService string
PublishStatusAddress string PublishStatusAddress string
@ -98,7 +97,7 @@ type Configuration struct {
DisableLua bool DisableLua bool
} }
// GetPublishService returns the configured service used to set ingress status // GetPublishService returns the Service used to set the load-balancer status of Ingresses.
func (n NGINXController) GetPublishService() *apiv1.Service { func (n NGINXController) GetPublishService() *apiv1.Service {
s, err := n.store.GetService(n.cfg.PublishService) s, err := n.store.GetService(n.cfg.PublishService)
if err != nil { if err != nil {
@ -108,9 +107,9 @@ func (n NGINXController) GetPublishService() *apiv1.Service {
return s return s
} }
// sync collects all the pieces required to assemble the configuration file and // syncIngress collects all the pieces required to assemble the NGINX
// then sends the content to the backend (OnUpdate) receiving the populated // configuration file and passes the resulting data structures to the backend
// template as response reloading the backend if is required. // (OnUpdate) when a reload is deemed necessary.
func (n *NGINXController) syncIngress(interface{}) error { func (n *NGINXController) syncIngress(interface{}) error {
n.syncRateLimiter.Accept() n.syncRateLimiter.Accept()
@ -118,7 +117,7 @@ func (n *NGINXController) syncIngress(interface{}) error {
return nil return nil
} }
// Sort ingress rules using the ResourceVersion field // sort Ingresses using the ResourceVersion field
ings := n.store.ListIngresses() ings := n.store.ListIngresses()
sort.SliceStable(ings, func(i, j int) bool { sort.SliceStable(ings, func(i, j int) bool {
ir := ings[i].ResourceVersion ir := ings[i].ResourceVersion
@ -136,7 +135,7 @@ func (n *NGINXController) syncIngress(interface{}) error {
for _, loc := range server.Locations { for _, loc := range server.Locations {
if loc.Path != rootLocation { if loc.Path != rootLocation {
glog.Warningf("ignoring path %v of ssl passthrough host %v", loc.Path, server.Hostname) glog.Warningf("Ignoring SSL Passthrough for location %q in server %q", loc.Path, server.Hostname)
continue continue
} }
passUpstreams = append(passUpstreams, &ingress.SSLPassthroughBackend{ passUpstreams = append(passUpstreams, &ingress.SSLPassthroughBackend{
@ -155,27 +154,29 @@ func (n *NGINXController) syncIngress(interface{}) error {
TCPEndpoints: n.getStreamServices(n.cfg.TCPConfigMapName, apiv1.ProtocolTCP), TCPEndpoints: n.getStreamServices(n.cfg.TCPConfigMapName, apiv1.ProtocolTCP),
UDPEndpoints: n.getStreamServices(n.cfg.UDPConfigMapName, apiv1.ProtocolUDP), UDPEndpoints: n.getStreamServices(n.cfg.UDPConfigMapName, apiv1.ProtocolUDP),
PassthroughBackends: passUpstreams, PassthroughBackends: passUpstreams,
ConfigurationChecksum: n.store.GetBackendConfiguration().Checksum,
} }
if !n.isForceReload() && n.runningConfig.Equal(&pcfg) { if n.runningConfig.Equal(&pcfg) {
glog.V(3).Infof("skipping backend reload (no changes detected)") glog.V(3).Infof("No configuration change detected, skipping backend reload.")
return nil return nil
} }
if n.cfg.DynamicConfigurationEnabled && n.IsDynamicConfigurationEnough(&pcfg) && !n.isForceReload() { if n.cfg.DynamicConfigurationEnabled && n.IsDynamicConfigurationEnough(&pcfg) {
glog.Infof("skipping reload") glog.Infof("Changes handled by the dynamic configuration, skipping backend reload.")
} else { } else {
glog.Infof("backend reload required") glog.Infof("Configuration changes detected, backend reload required.")
err := n.OnUpdate(pcfg) err := n.OnUpdate(pcfg)
if err != nil { if err != nil {
IncReloadErrorCount() IncReloadErrorCount()
ConfigSuccess(false) ConfigSuccess(false)
glog.Errorf("unexpected failure restarting the backend: \n%v", err) glog.Errorf("Unexpected failure reloading the backend:\n%v", err)
return err return err
} }
glog.Infof("ingress backend successfully reloaded...") glog.Infof("Backend successfully reloaded.")
ConfigSuccess(true) ConfigSuccess(true)
IncReloadCount() IncReloadCount()
setSSLExpireTime(servers) setSSLExpireTime(servers)
@ -185,49 +186,45 @@ func (n *NGINXController) syncIngress(interface{}) error {
isFirstSync := n.runningConfig.Equal(&ingress.Configuration{}) isFirstSync := n.runningConfig.Equal(&ingress.Configuration{})
go func(isFirstSync bool) { go func(isFirstSync bool) {
if isFirstSync { if isFirstSync {
glog.Infof("first sync of Nginx configuration") glog.Infof("Initial synchronization of the NGINX configuration.")
// it takes time for Nginx to start listening on the port // it takes time for NGINX to start listening on the configured ports
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
err := configureDynamically(&pcfg, n.cfg.ListenPorts.Status) err := configureDynamically(&pcfg, n.cfg.ListenPorts.Status)
if err == nil { if err == nil {
glog.Infof("dynamic reconfiguration succeeded") glog.Infof("Dynamic reconfiguration succeeded.")
} else { } else {
glog.Warningf("could not dynamically reconfigure: %v", err) glog.Warningf("Dynamic reconfiguration failed: %v", err)
} }
}(isFirstSync) }(isFirstSync)
} }
n.runningConfig = &pcfg n.runningConfig = &pcfg
n.SetForceReload(false)
return nil return nil
} }
func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Protocol) []ingress.L4Service { func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Protocol) []ingress.L4Service {
glog.V(3).Infof("obtaining information about stream services of type %v located in configmap %v", proto, configmapName) glog.V(3).Infof("Obtaining information about %v stream services from ConfigMap %q", proto, configmapName)
if configmapName == "" { if configmapName == "" {
// no configmap configured
return []ingress.L4Service{} return []ingress.L4Service{}
} }
_, _, err := k8s.ParseNameNS(configmapName) _, _, err := k8s.ParseNameNS(configmapName)
if err != nil { if err != nil {
glog.Errorf("unexpected error reading configmap %v: %v", configmapName, err) glog.Errorf("Error parsing ConfigMap reference %q: %v", configmapName, err)
return []ingress.L4Service{} return []ingress.L4Service{}
} }
configmap, err := n.store.GetConfigMap(configmapName) configmap, err := n.store.GetConfigMap(configmapName)
if err != nil { if err != nil {
glog.Errorf("unexpected error reading configmap %v: %v", configmapName, err) glog.Errorf("Error reading ConfigMap %q: %v", configmapName, err)
return []ingress.L4Service{} return []ingress.L4Service{}
} }
var svcs []ingress.L4Service var svcs []ingress.L4Service
var svcProxyProtocol ingress.ProxyProtocol var svcProxyProtocol ingress.ProxyProtocol
// k -> port to expose
// v -> <namespace>/<service name>:<port from service to be used>
rp := []int{ rp := []int{
n.cfg.ListenPorts.HTTP, n.cfg.ListenPorts.HTTP,
@ -239,21 +236,22 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
} }
reserverdPorts := sets.NewInt(rp...) reserverdPorts := sets.NewInt(rp...)
for k, v := range configmap.Data { // svcRef format: <(str)namespace>/<(str)service>:<(intstr)port>[:<(bool)decode>:<(bool)encode>]
externalPort, err := strconv.Atoi(k) for port, svcRef := range configmap.Data {
externalPort, err := strconv.Atoi(port)
if err != nil { if err != nil {
glog.Warningf("%v is not valid as a TCP/UDP port", k) glog.Warningf("%q is not a valid %v port number", port, proto)
continue continue
} }
if reserverdPorts.Has(externalPort) { if reserverdPorts.Has(externalPort) {
glog.Warningf("port %v cannot be used for TCP or UDP services. It is reserved for the Ingress controller", k) glog.Warningf("Port %d cannot be used for %v stream services. It is reserved for the Ingress controller.", externalPort, proto)
continue continue
} }
nsSvcPort := strings.Split(v, ":") nsSvcPort := strings.Split(svcRef, ":")
if len(nsSvcPort) < 2 { if len(nsSvcPort) < 2 {
glog.Warningf("invalid format (namespace/name:port:[PROXY]:[PROXY]) '%v'", k) glog.Warningf("Invalid Service reference %q for %v port %d", svcRef, proto, externalPort)
continue continue
} }
@ -262,7 +260,7 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
svcProxyProtocol.Decode = false svcProxyProtocol.Decode = false
svcProxyProtocol.Encode = false svcProxyProtocol.Encode = false
// Proxy protocol is possible if the service is TCP // Proxy Protocol is only compatible with TCP Services
if len(nsSvcPort) >= 3 && proto == apiv1.ProtocolTCP { if len(nsSvcPort) >= 3 && proto == apiv1.ProtocolTCP {
if len(nsSvcPort) >= 3 && strings.ToUpper(nsSvcPort[2]) == "PROXY" { if len(nsSvcPort) >= 3 && strings.ToUpper(nsSvcPort[2]) == "PROXY" {
svcProxyProtocol.Decode = true svcProxyProtocol.Decode = true
@ -280,14 +278,15 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
svc, err := n.store.GetService(nsName) svc, err := n.store.GetService(nsName)
if err != nil { if err != nil {
glog.Warningf("error getting service %v: %v", nsName, err) glog.Warningf("Error getting Service %q from local store: %v", nsName, err)
continue continue
} }
var endps []ingress.Endpoint var endps []ingress.Endpoint
targetPort, err := strconv.Atoi(svcPort) targetPort, err := strconv.Atoi(svcPort)
if err != nil { if err != nil {
glog.V(3).Infof("searching service %v endpoints using the name '%v'", svcNs, svcName, svcPort) // not a port number, fall back to using port name
glog.V(3).Infof("Searching Endpoints with %v port name %q for Service %q", proto, svcPort, nsName)
for _, sp := range svc.Spec.Ports { for _, sp := range svc.Spec.Ports {
if sp.Name == svcPort { if sp.Name == svcPort {
if sp.Protocol == proto { if sp.Protocol == proto {
@ -297,8 +296,7 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
} }
} }
} else { } else {
// we need to use the TargetPort (where the endpoints are running) glog.V(3).Infof("Searching Endpoints with %v port number %d for Service %q", proto, targetPort, nsName)
glog.V(3).Infof("searching service %v/%v endpoints using the target port '%v'", svcNs, svcName, targetPort)
for _, sp := range svc.Spec.Ports { for _, sp := range svc.Spec.Ports {
if sp.Port == int32(targetPort) { if sp.Port == int32(targetPort) {
if sp.Protocol == proto { if sp.Protocol == proto {
@ -309,10 +307,10 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
} }
} }
// stream services cannot contain empty upstreams and there is no // stream services cannot contain empty upstreams and there is
// default backend equivalent // no default backend equivalent
if len(endps) == 0 { if len(endps) == 0 {
glog.Warningf("service %v/%v does not have any active endpoints for port %v and protocol %v", svcNs, svcName, svcPort, proto) glog.Warningf("Service %q does not have any active Endpoint for %v port %v", nsName, proto, svcPort)
continue continue
} }
@ -332,9 +330,8 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
return svcs return svcs
} }
// getDefaultUpstream returns an upstream associated with the // getDefaultUpstream returns the upstream associated with the default backend.
// default backend service. In case of error retrieving information // Configures the upstream to return HTTP code 503 in case of error.
// configure the upstream to return http code 503.
func (n *NGINXController) getDefaultUpstream() *ingress.Backend { func (n *NGINXController) getDefaultUpstream() *ingress.Backend {
upstream := &ingress.Backend{ upstream := &ingress.Backend{
Name: defUpstreamName, Name: defUpstreamName,
@ -342,14 +339,14 @@ func (n *NGINXController) getDefaultUpstream() *ingress.Backend {
svcKey := n.cfg.DefaultService svcKey := n.cfg.DefaultService
svc, err := n.store.GetService(svcKey) svc, err := n.store.GetService(svcKey)
if err != nil { if err != nil {
glog.Warningf("unexpected error searching the default backend %v: %v", n.cfg.DefaultService, err) glog.Warningf("Unexpected error getting default backend %q from local store: %v", n.cfg.DefaultService, err)
upstream.Endpoints = append(upstream.Endpoints, n.DefaultEndpoint()) upstream.Endpoints = append(upstream.Endpoints, n.DefaultEndpoint())
return upstream return upstream
} }
endps := getEndpoints(svc, &svc.Spec.Ports[0], apiv1.ProtocolTCP, &healthcheck.Config{}, n.store.GetServiceEndpoints) endps := getEndpoints(svc, &svc.Spec.Ports[0], apiv1.ProtocolTCP, &healthcheck.Config{}, n.store.GetServiceEndpoints)
if len(endps) == 0 { if len(endps) == 0 {
glog.Warningf("service %v does not have any active endpoints", svcKey) glog.Warningf("Service %q does not have any active Endpoint", svcKey)
endps = []ingress.Endpoint{n.DefaultEndpoint()} endps = []ingress.Endpoint{n.DefaultEndpoint()}
} }
@ -358,8 +355,9 @@ func (n *NGINXController) getDefaultUpstream() *ingress.Backend {
return upstream return upstream
} }
// getBackendServers returns a list of Upstream and Server to be used by the backend // getBackendServers returns a list of Upstream and Server to be used by the
// An upstream can be used in multiple servers if the namespace, service name and port are the same // backend. An upstream can be used in multiple servers if the namespace,
// service name and port are the same.
func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]*ingress.Backend, []*ingress.Server) { func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]*ingress.Backend, []*ingress.Server) {
du := n.getDefaultUpstream() du := n.getDefaultUpstream()
upstreams := n.createUpstreams(ingresses, du) upstreams := n.createUpstreams(ingresses, du)
@ -368,7 +366,7 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
for _, ing := range ingresses { for _, ing := range ingresses {
anns, err := n.store.GetIngressAnnotations(ing) anns, err := n.store.GetIngressAnnotations(ing)
if err != nil { if err != nil {
glog.Errorf("unexpected error reading ingress annotations: %v", err) glog.Errorf("Unexpected error reading annotations for Ingress %q from local store: %v", ing.Name, err)
} }
for _, rule := range ing.Spec.Rules { for _, rule := range ing.Spec.Rules {
@ -383,7 +381,7 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
if rule.HTTP == nil && if rule.HTTP == nil &&
host != defServerName { host != defServerName {
glog.V(3).Infof("ingress rule %v/%v does not contain HTTP rules, using default backend", ing.Namespace, ing.Name) glog.V(3).Infof("Ingress \"%v/%v\" does not contain any HTTP rule, using default backend.", ing.Namespace, ing.Name)
continue continue
} }
@ -393,23 +391,21 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
if server.CertificateAuth.CAFileName == "" { if server.CertificateAuth.CAFileName == "" {
server.CertificateAuth = anns.CertificateAuth server.CertificateAuth = anns.CertificateAuth
// It is possible that no CAFileName is found in the secret if server.CertificateAuth.Secret != "" && server.CertificateAuth.CAFileName == "" {
if server.CertificateAuth.CAFileName == "" { glog.V(3).Infof("Secret %q does not contain 'ca.crt' key, mutual authentication disabled for Ingress \"%v/%v\"", server.CertificateAuth.Secret, ing.Namespace, ing.Name)
glog.V(3).Infof("secret %v does not contain 'ca.crt', mutual authentication not enabled - ingress rule %v/%v.", server.CertificateAuth.Secret, ing.Namespace, ing.Name)
} }
} else { } else {
glog.V(3).Infof("server %v already contains a mutual authentication configuration - ingress rule %v/%v", server.Hostname, ing.Namespace, ing.Name) glog.V(3).Infof("Server %v is already configured for mutual authentication (Ingress \"%v/%v\")", server.Hostname, ing.Namespace, ing.Name)
} }
for _, path := range rule.HTTP.Paths { for _, path := range rule.HTTP.Paths {
upsName := fmt.Sprintf("%v-%v-%v", upsName := fmt.Sprintf("%v-%v-%v",
ing.GetNamespace(), ing.Namespace,
path.Backend.ServiceName, path.Backend.ServiceName,
path.Backend.ServicePort.String()) path.Backend.ServicePort.String())
ups := upstreams[upsName] ups := upstreams[upsName]
// if there's no path defined we assume /
nginxPath := rootLocation nginxPath := rootLocation
if path.Path != "" { if path.Path != "" {
nginxPath = path.Path nginxPath = path.Path
@ -421,11 +417,11 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
addLoc = false addLoc = false
if !loc.IsDefBackend { if !loc.IsDefBackend {
glog.V(3).Infof("avoiding replacement of ingress rule %v/%v location %v upstream %v (%v)", ing.Namespace, ing.Name, loc.Path, ups.Name, loc.Backend) glog.V(3).Infof("Location %q already configured for server %q with upstream %q (Ingress \"%v/%v\")", loc.Path, server.Hostname, loc.Backend, ing.Namespace, ing.Name)
break break
} }
glog.V(3).Infof("replacing ingress rule %v/%v location %v upstream %v (%v)", ing.Namespace, ing.Name, loc.Path, ups.Name, loc.Backend) glog.V(3).Infof("Replacing location %q for server %q with upstream %q to use upstream %q (Ingress \"%v/%v\")", loc.Path, server.Hostname, loc.Backend, ups.Name, ing.Namespace, ing.Name)
loc.Backend = ups.Name loc.Backend = ups.Name
loc.IsDefBackend = false loc.IsDefBackend = false
loc.Port = ups.Port loc.Port = ups.Port
@ -441,7 +437,6 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
loc.Redirect = anns.Redirect loc.Redirect = anns.Redirect
loc.Rewrite = anns.Rewrite loc.Rewrite = anns.Rewrite
loc.UpstreamVhost = anns.UpstreamVhost loc.UpstreamVhost = anns.UpstreamVhost
loc.VtsFilterKey = anns.VtsFilterKey
loc.Whitelist = anns.Whitelist loc.Whitelist = anns.Whitelist
loc.Denied = anns.Denied loc.Denied = anns.Denied
loc.XForwardedPrefix = anns.XForwardedPrefix loc.XForwardedPrefix = anns.XForwardedPrefix
@ -459,9 +454,10 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
break break
} }
} }
// is a new location
// new location
if addLoc { if addLoc {
glog.V(3).Infof("adding location %v in ingress rule %v/%v upstream %v", nginxPath, ing.Namespace, ing.Name, ups.Name) glog.V(3).Infof("Adding location %q for server %q with upstream %q (Ingress \"%v/%v\")", nginxPath, server.Hostname, ups.Name, ing.Namespace, ing.Name)
loc := &ingress.Location{ loc := &ingress.Location{
Path: nginxPath, Path: nginxPath,
Backend: ups.Name, Backend: ups.Name,
@ -479,7 +475,6 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
Redirect: anns.Redirect, Redirect: anns.Redirect,
Rewrite: anns.Rewrite, Rewrite: anns.Rewrite,
UpstreamVhost: anns.UpstreamVhost, UpstreamVhost: anns.UpstreamVhost,
VtsFilterKey: anns.VtsFilterKey,
Whitelist: anns.Whitelist, Whitelist: anns.Whitelist,
Denied: anns.Denied, Denied: anns.Denied,
XForwardedPrefix: anns.XForwardedPrefix, XForwardedPrefix: anns.XForwardedPrefix,
@ -525,15 +520,16 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
for _, location := range server.Locations { for _, location := range server.Locations {
if upstream.Name == location.Backend { if upstream.Name == location.Backend {
if len(upstream.Endpoints) == 0 { if len(upstream.Endpoints) == 0 {
glog.V(3).Infof("upstream %v does not have any active endpoints.", upstream.Name) glog.V(3).Infof("Upstream %q does not have any active endpoints.", upstream.Name)
location.Backend = "" // for nginx.tmpl checking
// check if the location contains endpoints and a custom default backend // check if the location contains endpoints and a custom default backend
if location.DefaultBackend != nil { if location.DefaultBackend != nil {
sp := location.DefaultBackend.Spec.Ports[0] sp := location.DefaultBackend.Spec.Ports[0]
endps := getEndpoints(location.DefaultBackend, &sp, apiv1.ProtocolTCP, &healthcheck.Config{}, n.store.GetServiceEndpoints) endps := getEndpoints(location.DefaultBackend, &sp, apiv1.ProtocolTCP, &healthcheck.Config{}, n.store.GetServiceEndpoints)
if len(endps) > 0 { if len(endps) > 0 {
glog.V(3).Infof("using custom default backend in server %v location %v (service %v/%v)", glog.V(3).Infof("Using custom default backend for location %q in server %q (Service \"%v/%v\")",
server.Hostname, location.Path, location.DefaultBackend.Namespace, location.DefaultBackend.Name) location.Path, server.Hostname, location.DefaultBackend.Namespace, location.DefaultBackend.Name)
nb := upstream.DeepCopy() nb := upstream.DeepCopy()
name := fmt.Sprintf("custom-default-backend-%v", upstream.Name) name := fmt.Sprintf("custom-default-backend-%v", upstream.Name)
nb.Name = name nb.Name = name
@ -544,14 +540,12 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
} }
} }
// Configure Backends[].SSLPassthrough
if server.SSLPassthrough { if server.SSLPassthrough {
if location.Path == rootLocation { if location.Path == rootLocation {
if location.Backend == defUpstreamName { if location.Backend == defUpstreamName {
glog.Warningf("ignoring ssl passthrough of %v as it doesn't have a default backend (root context)", server.Hostname) glog.Warningf("Server %q has no default backend, ignoring SSL Passthrough.", server.Hostname)
continue continue
} }
isHTTPSfrom = append(isHTTPSfrom, server) isHTTPSfrom = append(isHTTPSfrom, server)
} }
} }
@ -564,7 +558,7 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
} }
} }
// create the list of upstreams and skip those without endpoints // create the list of upstreams and skip those without Endpoints
for _, upstream := range upstreams { for _, upstream := range upstreams {
if len(upstream.Endpoints) == 0 { if len(upstream.Endpoints) == 0 {
continue continue
@ -591,8 +585,8 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]
return aUpstreams, aServers return aUpstreams, aServers
} }
// createUpstreams creates the NGINX upstreams for each service referenced in // createUpstreams creates the NGINX upstreams (Endpoints) for each Service
// Ingress rules. The servers inside the upstream are endpoints. // referenced in Ingress rules.
func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingress.Backend) map[string]*ingress.Backend { func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingress.Backend) map[string]*ingress.Backend {
upstreams := make(map[string]*ingress.Backend) upstreams := make(map[string]*ingress.Backend)
upstreams[defUpstreamName] = du upstreams[defUpstreamName] = du
@ -600,17 +594,17 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
for _, ing := range data { for _, ing := range data {
anns, err := n.store.GetIngressAnnotations(ing) anns, err := n.store.GetIngressAnnotations(ing)
if err != nil { if err != nil {
glog.Errorf("unexpected error reading ingress annotations: %v", err) glog.Errorf("Error reading Ingress annotations: %v", err)
} }
var defBackend string var defBackend string
if ing.Spec.Backend != nil { if ing.Spec.Backend != nil {
defBackend = fmt.Sprintf("%v-%v-%v", defBackend = fmt.Sprintf("%v-%v-%v",
ing.GetNamespace(), ing.Namespace,
ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServiceName,
ing.Spec.Backend.ServicePort.String()) ing.Spec.Backend.ServicePort.String())
glog.V(3).Infof("creating upstream %v", defBackend) glog.V(3).Infof("Creating upstream %q", defBackend)
upstreams[defBackend] = newUpstream(defBackend) upstreams[defBackend] = newUpstream(defBackend)
if !upstreams[defBackend].Secure { if !upstreams[defBackend].Secure {
upstreams[defBackend].Secure = anns.SecureUpstream.Secure upstreams[defBackend].Secure = anns.SecureUpstream.Secure
@ -625,14 +619,13 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
upstreams[defBackend].LoadBalancing = anns.LoadBalancing upstreams[defBackend].LoadBalancing = anns.LoadBalancing
} }
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName) svcKey := fmt.Sprintf("%v/%v", ing.Namespace, ing.Spec.Backend.ServiceName)
// Add the service cluster endpoint as the upstream instead of individual endpoints // add the service ClusterIP as a single Endpoint instead of individual Endpoints
// if the serviceUpstream annotation is enabled
if anns.ServiceUpstream { if anns.ServiceUpstream {
endpoint, err := n.getServiceClusterEndpoint(svcKey, ing.Spec.Backend) endpoint, err := n.getServiceClusterEndpoint(svcKey, ing.Spec.Backend)
if err != nil { if err != nil {
glog.Errorf("Failed to get service cluster endpoint for service %s: %v", svcKey, err) glog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err)
} else { } else {
upstreams[defBackend].Endpoints = []ingress.Endpoint{endpoint} upstreams[defBackend].Endpoints = []ingress.Endpoint{endpoint}
} }
@ -642,7 +635,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
endps, err := n.serviceEndpoints(svcKey, ing.Spec.Backend.ServicePort.String(), &anns.HealthCheck) endps, err := n.serviceEndpoints(svcKey, ing.Spec.Backend.ServicePort.String(), &anns.HealthCheck)
upstreams[defBackend].Endpoints = append(upstreams[defBackend].Endpoints, endps...) upstreams[defBackend].Endpoints = append(upstreams[defBackend].Endpoints, endps...)
if err != nil { if err != nil {
glog.Warningf("error creating upstream %v: %v", defBackend, err) glog.Warningf("Error creating upstream %q: %v", defBackend, err)
} }
} }
@ -655,7 +648,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
for _, path := range rule.HTTP.Paths { for _, path := range rule.HTTP.Paths {
name := fmt.Sprintf("%v-%v-%v", name := fmt.Sprintf("%v-%v-%v",
ing.GetNamespace(), ing.Namespace,
path.Backend.ServiceName, path.Backend.ServiceName,
path.Backend.ServicePort.String()) path.Backend.ServicePort.String())
@ -663,7 +656,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
continue continue
} }
glog.V(3).Infof("creating upstream %v", name) glog.V(3).Infof("Creating upstream %q", name)
upstreams[name] = newUpstream(name) upstreams[name] = newUpstream(name)
upstreams[name].Port = path.Backend.ServicePort upstreams[name].Port = path.Backend.ServicePort
@ -683,14 +676,13 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
upstreams[name].LoadBalancing = anns.LoadBalancing upstreams[name].LoadBalancing = anns.LoadBalancing
} }
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) svcKey := fmt.Sprintf("%v/%v", ing.Namespace, path.Backend.ServiceName)
// Add the service cluster endpoint as the upstream instead of individual endpoints // add the service ClusterIP as a single Endpoint instead of individual Endpoints
// if the serviceUpstream annotation is enabled
if anns.ServiceUpstream { if anns.ServiceUpstream {
endpoint, err := n.getServiceClusterEndpoint(svcKey, &path.Backend) endpoint, err := n.getServiceClusterEndpoint(svcKey, &path.Backend)
if err != nil { if err != nil {
glog.Errorf("failed to get service cluster endpoint for service %s: %v", svcKey, err) glog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err)
} else { } else {
upstreams[name].Endpoints = []ingress.Endpoint{endpoint} upstreams[name].Endpoints = []ingress.Endpoint{endpoint}
} }
@ -699,7 +691,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
if len(upstreams[name].Endpoints) == 0 { if len(upstreams[name].Endpoints) == 0 {
endp, err := n.serviceEndpoints(svcKey, path.Backend.ServicePort.String(), &anns.HealthCheck) endp, err := n.serviceEndpoints(svcKey, path.Backend.ServicePort.String(), &anns.HealthCheck)
if err != nil { if err != nil {
glog.Warningf("error obtaining service endpoints: %v", err) glog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err)
continue continue
} }
upstreams[name].Endpoints = endp upstreams[name].Endpoints = endp
@ -707,7 +699,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
s, err := n.store.GetService(svcKey) s, err := n.store.GetService(svcKey)
if err != nil { if err != nil {
glog.Warningf("error obtaining service: %v", err) glog.Warningf("Error obtaining Service %q: %v", svcKey, err)
continue continue
} }
@ -719,20 +711,22 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres
return upstreams return upstreams
} }
// getServiceClusterEndpoint returns an Endpoint corresponding to the ClusterIP
// field of a Service.
func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *extensions.IngressBackend) (endpoint ingress.Endpoint, err error) { func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *extensions.IngressBackend) (endpoint ingress.Endpoint, err error) {
svc, err := n.store.GetService(svcKey) svc, err := n.store.GetService(svcKey)
if err != nil { if err != nil {
return endpoint, fmt.Errorf("service %v does not exist", svcKey) return endpoint, fmt.Errorf("service %q does not exist", svcKey)
} }
if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" { if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" {
return endpoint, fmt.Errorf("No ClusterIP found for service %s", svcKey) return endpoint, fmt.Errorf("no ClusterIP found for Service %q", svcKey)
} }
endpoint.Address = svc.Spec.ClusterIP endpoint.Address = svc.Spec.ClusterIP
// If the service port in the ingress uses a name, lookup // if the Service port is referenced by name in the Ingress, lookup the
// the actual port in the service spec // actual port in the service spec
if backend.ServicePort.Type == intstr.String { if backend.ServicePort.Type == intstr.String {
var port int32 = -1 var port int32 = -1
for _, svcPort := range svc.Spec.Ports { for _, svcPort := range svc.Spec.Ports {
@ -742,7 +736,7 @@ func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *exte
} }
} }
if port == -1 { if port == -1 {
return endpoint, fmt.Errorf("no port mapped for service %s and port name %s", svc.Name, backend.ServicePort.String()) return endpoint, fmt.Errorf("service %q does not have a port named %q", svc.Name, backend.ServicePort)
} }
endpoint.Port = fmt.Sprintf("%d", port) endpoint.Port = fmt.Sprintf("%d", port)
} else { } else {
@ -752,27 +746,27 @@ func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *exte
return endpoint, err return endpoint, err
} }
// serviceEndpoints returns the upstream servers (endpoints) associated // serviceEndpoints returns the upstream servers (Endpoints) associated with a
// to a service. // Service.
func (n *NGINXController) serviceEndpoints(svcKey, backendPort string, func (n *NGINXController) serviceEndpoints(svcKey, backendPort string,
hz *healthcheck.Config) ([]ingress.Endpoint, error) { hz *healthcheck.Config) ([]ingress.Endpoint, error) {
svc, err := n.store.GetService(svcKey) svc, err := n.store.GetService(svcKey)
var upstreams []ingress.Endpoint var upstreams []ingress.Endpoint
if err != nil { if err != nil {
return upstreams, fmt.Errorf("error getting service %v from the cache: %v", svcKey, err) return upstreams, fmt.Errorf("error getting Service %q from local store: %v", svcKey, err)
} }
glog.V(3).Infof("obtaining port information for service %v", svcKey) glog.V(3).Infof("Obtaining ports information for Service %q", svcKey)
for _, servicePort := range svc.Spec.Ports { for _, servicePort := range svc.Spec.Ports {
// targetPort could be a string, use the name or the port (int) // targetPort could be a string, use either the port name or number (int)
if strconv.Itoa(int(servicePort.Port)) == backendPort || if strconv.Itoa(int(servicePort.Port)) == backendPort ||
servicePort.TargetPort.String() == backendPort || servicePort.TargetPort.String() == backendPort ||
servicePort.Name == backendPort { servicePort.Name == backendPort {
endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, hz, n.store.GetServiceEndpoints) endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, hz, n.store.GetServiceEndpoints)
if len(endps) == 0 { if len(endps) == 0 {
glog.Warningf("service %v does not have any active endpoints", svcKey) glog.Warningf("Service %q does not have any active Endpoint.", svcKey)
} }
if n.cfg.SortBackends { if n.cfg.SortBackends {
@ -791,11 +785,11 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string,
} }
} }
// Ingress with an ExternalName service and no port defined in the service. // Ingress with an ExternalName Service and no port defined for that Service
if len(svc.Spec.Ports) == 0 && svc.Spec.Type == apiv1.ServiceTypeExternalName { if len(svc.Spec.Ports) == 0 && svc.Spec.Type == apiv1.ServiceTypeExternalName {
externalPort, err := strconv.Atoi(backendPort) externalPort, err := strconv.Atoi(backendPort)
if err != nil { if err != nil {
glog.Warningf("only numeric ports are allowed in ExternalName services: %v is not valid as a TCP/UDP port", backendPort) glog.Warningf("Only numeric ports are allowed in ExternalName Services: %q is not a valid port number.", backendPort)
return upstreams, nil return upstreams, nil
} }
@ -806,7 +800,7 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string,
} }
endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, hz, n.store.GetServiceEndpoints) endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, hz, n.store.GetServiceEndpoints)
if len(endps) == 0 { if len(endps) == 0 {
glog.Warningf("service %v does not have any active endpoints", svcKey) glog.Warningf("Service %q does not have any active Endpoint.", svcKey)
return upstreams, nil return upstreams, nil
} }
@ -825,17 +819,14 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string,
return upstreams, nil return upstreams, nil
} }
// createServers initializes a map that contains information about the list of // createServers builds a map of host name to Server structs from a map of
// FDQN referenced by ingress rules and the common name field in the referenced // already computed Upstream structs. Each Server is configured with at least
// SSL certificates. Each server is configured with location / using a default // one root location, which uses a default backend if left unspecified.
// backend specified by the user or the one inside the ingress spec.
func (n *NGINXController) createServers(data []*extensions.Ingress, func (n *NGINXController) createServers(data []*extensions.Ingress,
upstreams map[string]*ingress.Backend, upstreams map[string]*ingress.Backend,
du *ingress.Backend) map[string]*ingress.Server { du *ingress.Backend) map[string]*ingress.Server {
servers := make(map[string]*ingress.Server, len(data)) servers := make(map[string]*ingress.Server, len(data))
// If a server has a hostname equivalent to a pre-existing alias, then we
// remove the alias to avoid conflicts.
aliases := make(map[string]string, len(data)) aliases := make(map[string]string, len(data))
bdef := n.store.GetDefaultBackend() bdef := n.store.GetDefaultBackend()
@ -858,15 +849,14 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
defaultPemFileName := n.cfg.FakeCertificatePath defaultPemFileName := n.cfg.FakeCertificatePath
defaultPemSHA := n.cfg.FakeCertificateSHA defaultPemSHA := n.cfg.FakeCertificateSHA
// Tries to fetch the default Certificate from nginx configuration. // read custom default SSL certificate, fall back to generated default certificate
// If it does not exists, use the ones generated on Start()
defaultCertificate, err := n.store.GetLocalSSLCert(n.cfg.DefaultSSLCertificate) defaultCertificate, err := n.store.GetLocalSSLCert(n.cfg.DefaultSSLCertificate)
if err == nil { if err == nil {
defaultPemFileName = defaultCertificate.PemFileName defaultPemFileName = defaultCertificate.PemFileName
defaultPemSHA = defaultCertificate.PemSHA defaultPemSHA = defaultCertificate.PemSHA
} }
// initialize the default server // initialize default server and root location
servers[defServerName] = &ingress.Server{ servers[defServerName] = &ingress.Server{
Hostname: defServerName, Hostname: defServerName,
SSLCert: ingress.SSLCert{ SSLCert: ingress.SSLCert{
@ -883,33 +873,34 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
}, },
}} }}
// initialize all the servers // initialize all other servers
for _, ing := range data { for _, ing := range data {
anns, err := n.store.GetIngressAnnotations(ing) anns, err := n.store.GetIngressAnnotations(ing)
if err != nil { if err != nil {
glog.Errorf("unexpected error reading ingress annotations: %v", err) glog.Errorf("Error reading Ingress %q annotations from local store: %v", ing.Name, err)
} }
// default upstream server // default upstream name
un := du.Name un := du.Name
if ing.Spec.Backend != nil { if ing.Spec.Backend != nil {
// replace default backend defUpstream := fmt.Sprintf("%v-%v-%v", ing.Namespace, ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort.String())
defUpstream := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort.String())
if backendUpstream, ok := upstreams[defUpstream]; ok { if backendUpstream, ok := upstreams[defUpstream]; ok {
// use backend specified in Ingress as the default backend for all its rules
un = backendUpstream.Name un = backendUpstream.Name
// Special case: // special "catch all" case, Ingress with a backend but no rule
// ingress only with a backend and no rules
// this case defines a "catch all" server
defLoc := servers[defServerName].Locations[0] defLoc := servers[defServerName].Locations[0]
if defLoc.IsDefBackend && len(ing.Spec.Rules) == 0 { if defLoc.IsDefBackend && len(ing.Spec.Rules) == 0 {
glog.Infof("Ingress \"%v/%v\" defines a backend but no rule. Using it to configure the catch-all server %q", ing.Namespace, ing.Name, defServerName)
defLoc.IsDefBackend = false defLoc.IsDefBackend = false
defLoc.Backend = backendUpstream.Name defLoc.Backend = backendUpstream.Name
defLoc.Service = backendUpstream.Service defLoc.Service = backendUpstream.Service
defLoc.Ingress = ing defLoc.Ingress = ing
// we need to use the ingress annotations // customize using Ingress annotations
defLoc.Logs = anns.Logs defLoc.Logs = anns.Logs
defLoc.BasicDigestAuth = anns.BasicDigestAuth defLoc.BasicDigestAuth = anns.BasicDigestAuth
defLoc.ClientBodyBufferSize = anns.ClientBodyBufferSize defLoc.ClientBodyBufferSize = anns.ClientBodyBufferSize
@ -918,16 +909,17 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
defLoc.ExternalAuth = anns.ExternalAuth defLoc.ExternalAuth = anns.ExternalAuth
defLoc.Proxy = anns.Proxy defLoc.Proxy = anns.Proxy
defLoc.RateLimit = anns.RateLimit defLoc.RateLimit = anns.RateLimit
// TODO: Redirect and rewrite can affect the catch all behavior. Don't use this annotations for now // TODO: Redirect and rewrite can affect the catch all behavior, skip for now
// defLoc.Redirect = anns.Redirect // defLoc.Redirect = anns.Redirect
// defLoc.Rewrite = anns.Rewrite // defLoc.Rewrite = anns.Rewrite
defLoc.UpstreamVhost = anns.UpstreamVhost defLoc.UpstreamVhost = anns.UpstreamVhost
defLoc.VtsFilterKey = anns.VtsFilterKey
defLoc.Whitelist = anns.Whitelist defLoc.Whitelist = anns.Whitelist
defLoc.Denied = anns.Denied defLoc.Denied = anns.Denied
defLoc.GRPC = anns.GRPC defLoc.GRPC = anns.GRPC
defLoc.LuaRestyWAF = anns.LuaRestyWAF defLoc.LuaRestyWAF = anns.LuaRestyWAF
defLoc.InfluxDB = anns.InfluxDB defLoc.InfluxDB = anns.InfluxDB
} else {
glog.V(3).Infof("Ingress \"%v/%v\" defines both a backend and rules. Using its backend as default upstream for all its rules.", ing.Namespace, ing.Name)
} }
} }
} }
@ -963,7 +955,7 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
for _, ing := range data { for _, ing := range data {
anns, err := n.store.GetIngressAnnotations(ing) anns, err := n.store.GetIngressAnnotations(ing)
if err != nil { if err != nil {
glog.Errorf("unexpected error reading ingress annotations: %v", err) glog.Errorf("Error reading Ingress %q annotations from local store: %v", ing.Name, err)
} }
for _, rule := range ing.Spec.Rules { for _, rule := range ing.Spec.Rules {
@ -972,7 +964,6 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
host = defServerName host = defServerName
} }
// setup server aliases
if anns.Alias != "" { if anns.Alias != "" {
if servers[host].Alias == "" { if servers[host].Alias == "" {
servers[host].Alias = anns.Alias servers[host].Alias = anns.Alias
@ -980,23 +971,21 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
aliases["Alias"] = host aliases["Alias"] = host
} }
} else { } else {
glog.Warningf("ingress %v/%v for host %v contains an Alias but one has already been configured.", glog.Warningf("Aliases already configured for server %q, skipping (Ingress \"%v/%v\")",
ing.Namespace, ing.Name, host) host, ing.Namespace, ing.Name)
} }
} }
//notifying the user that it has already been configured. if anns.ServerSnippet != "" {
if servers[host].ServerSnippet != "" && anns.ServerSnippet != "" { if servers[host].ServerSnippet == "" {
glog.Warningf("ingress %v/%v for host %v contains a Server Snippet section that it has already been configured.", servers[host].ServerSnippet = anns.ServerSnippet
ing.Namespace, ing.Name, host) } else {
glog.Warningf("Server snippet already configured for server %q, skipping (Ingress \"%v/%v\")",
host, ing.Namespace, ing.Name)
}
} }
// only add a server snippet if the server does not have one previously configured // only add SSL ciphers if the server does not have them previously configured
if servers[host].ServerSnippet == "" && anns.ServerSnippet != "" {
servers[host].ServerSnippet = anns.ServerSnippet
}
// only add ssl ciphers if the server does not have one previously configured
if servers[host].SSLCiphers == "" && anns.SSLCiphers != "" { if servers[host].SSLCiphers == "" && anns.SSLCiphers != "" {
servers[host].SSLCiphers = anns.SSLCiphers servers[host].SSLCiphers = anns.SSLCiphers
} }
@ -1007,14 +996,14 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
} }
if len(ing.Spec.TLS) == 0 { if len(ing.Spec.TLS) == 0 {
glog.V(3).Infof("ingress %v/%v for host %v does not contains a TLS section", ing.Namespace, ing.Name, host) glog.V(3).Infof("Ingress \"%v/%v\" does not contains a TLS section.", ing.Namespace, ing.Name)
continue continue
} }
tlsSecretName := extractTLSSecretName(host, ing, n.store.GetLocalSSLCert) tlsSecretName := extractTLSSecretName(host, ing, n.store.GetLocalSSLCert)
if tlsSecretName == "" { if tlsSecretName == "" {
glog.V(3).Infof("host %v is listed on tls section but secretName is empty. Using default cert", host) glog.V(3).Infof("Host %q is listed in the TLS section but secretName is empty. Using default certificate.", host)
servers[host].SSLCert.PemFileName = defaultPemFileName servers[host].SSLCert.PemFileName = defaultPemFileName
servers[host].SSLCert.PemSHA = defaultPemSHA servers[host].SSLCert.PemSHA = defaultPemSHA
continue continue
@ -1023,19 +1012,19 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
key := fmt.Sprintf("%v/%v", ing.Namespace, tlsSecretName) key := fmt.Sprintf("%v/%v", ing.Namespace, tlsSecretName)
cert, err := n.store.GetLocalSSLCert(key) cert, err := n.store.GetLocalSSLCert(key)
if err != nil { if err != nil {
glog.Warningf("ssl certificate \"%v\" does not exist in local store", key) glog.Warningf("SSL certificate %q does not exist in local store.", key)
continue continue
} }
err = cert.Certificate.VerifyHostname(host) err = cert.Certificate.VerifyHostname(host)
if err != nil { if err != nil {
glog.Warningf("unexpected error validating SSL certificate %v for host %v. Reason: %v", key, host, err) glog.Warningf("Unexpected error validating SSL certificate %q for server %q: %v", key, host, err)
glog.Warningf("Validating certificate against DNS names. This will be deprecated in a future version.") glog.Warningf("Validating certificate against DNS names. This will be deprecated in a future version.")
// check the common name field // check the Common Name field
// https://github.com/golang/go/issues/22922 // https://github.com/golang/go/issues/22922
err := verifyHostname(host, cert.Certificate) err := verifyHostname(host, cert.Certificate)
if err != nil { if err != nil {
glog.Warningf("ssl certificate %v does not contain a Common Name or Subject Alternative Name for host %v. Reason: %v", key, host, err) glog.Warningf("SSL certificate %q does not contain a Common Name or Subject Alternative Name for server %q: %v", key, host, err)
continue continue
} }
} }
@ -1043,14 +1032,14 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
servers[host].SSLCert = *cert servers[host].SSLCert = *cert
if cert.ExpireTime.Before(time.Now().Add(240 * time.Hour)) { if cert.ExpireTime.Before(time.Now().Add(240 * time.Hour)) {
glog.Warningf("ssl certificate for host %v is about to expire in 10 days", host) glog.Warningf("SSL certificate for server %q is about to expire (%v)", cert.ExpireTime)
} }
} }
} }
for alias, host := range aliases { for alias, host := range aliases {
if _, ok := servers[alias]; ok { if _, ok := servers[alias]; ok {
glog.Warningf("There is a conflict with server hostname '%v' and alias '%v' (in server %v). Removing alias to avoid conflicts.", alias, host) glog.Warningf("Conflicting hostname (%v) and alias (%v) in server %q. Removing alias to avoid conflicts.", alias, host)
servers[host].Alias = "" servers[host].Alias = ""
} }
} }
@ -1058,43 +1047,28 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
return servers return servers
} }
func (n *NGINXController) isForceReload() bool { // extractTLSSecretName returns the name of the Secret containing a SSL
return atomic.LoadInt32(&n.forceReload) != 0 // certificate for the given host name, or an empty string.
}
// SetForceReload sets if the ingress controller should be reloaded or not
func (n *NGINXController) SetForceReload(shouldReload bool) {
if shouldReload {
atomic.StoreInt32(&n.forceReload, 1)
n.syncQueue.Enqueue(&extensions.Ingress{})
} else {
atomic.StoreInt32(&n.forceReload, 0)
}
}
// extractTLSSecretName returns the name of the secret that
// contains a SSL certificate for a particular hostname.
// In case there is no match, an empty string is returned.
func extractTLSSecretName(host string, ing *extensions.Ingress, func extractTLSSecretName(host string, ing *extensions.Ingress,
getLocalSSLCert func(string) (*ingress.SSLCert, error)) string { getLocalSSLCert func(string) (*ingress.SSLCert, error)) string {
if ing == nil { if ing == nil {
return "" return ""
} }
// naively return Secret name from TLS spec if host name matches
for _, tls := range ing.Spec.TLS { for _, tls := range ing.Spec.TLS {
if sets.NewString(tls.Hosts...).Has(host) { if sets.NewString(tls.Hosts...).Has(host) {
return tls.SecretName return tls.SecretName
} }
} }
// contains a TLS section but none of the host match or there // no TLS host matching host name, try each TLS host for matching CN
// is no hosts in the TLS section. As last resort we valide
// the host against the certificate and we use it if is valid
for _, tls := range ing.Spec.TLS { for _, tls := range ing.Spec.TLS {
key := fmt.Sprintf("%v/%v", ing.Namespace, tls.SecretName) key := fmt.Sprintf("%v/%v", ing.Namespace, tls.SecretName)
cert, err := getLocalSSLCert(key) cert, err := getLocalSSLCert(key)
if err != nil { if err != nil {
glog.Warningf("ssl certificate \"%v\" does not exist in local store", key) glog.Warningf("SSL certificate %q does not exist in local store.", key)
continue continue
} }

View file

@ -29,14 +29,9 @@ import (
"k8s.io/ingress-nginx/internal/ingress/annotations/healthcheck" "k8s.io/ingress-nginx/internal/ingress/annotations/healthcheck"
) )
// getEndpoints returns a list of <endpoint ip>:<port> for a given service/target port combination. // getEndpoints returns a list of Endpoint structs for a given service/target port combination.
func getEndpoints( func getEndpoints(s *corev1.Service, port *corev1.ServicePort, proto corev1.Protocol, hz *healthcheck.Config,
s *corev1.Service, getServiceEndpoints func(*corev1.Service) (*corev1.Endpoints, error)) []ingress.Endpoint {
port *corev1.ServicePort,
proto corev1.Protocol,
hz *healthcheck.Config,
getServiceEndpoints func(*corev1.Service) (*corev1.Endpoints, error),
) []ingress.Endpoint {
upsServers := []ingress.Endpoint{} upsServers := []ingress.Endpoint{}
@ -44,26 +39,24 @@ func getEndpoints(
return upsServers return upsServers
} }
// avoid duplicated upstream servers when the service // using a map avoids duplicated upstream servers when the service
// contains multiple port definitions sharing the same // contains multiple port definitions sharing the same targetport
// targetport. processedUpstreamServers := make(map[string]struct{})
adus := make(map[string]bool)
// ExternalName services // ExternalName services
if s.Spec.Type == corev1.ServiceTypeExternalName { if s.Spec.Type == corev1.ServiceTypeExternalName {
glog.V(3).Infof("Ingress using a service %v of type=ExternalName : %v", s.Name) glog.V(3).Infof("Ingress using Service %q of type ExternalName.", s.Name)
targetPort := port.TargetPort.IntValue() targetPort := port.TargetPort.IntValue()
// check for invalid port value
if targetPort <= 0 { if targetPort <= 0 {
glog.Errorf("ExternalName service with an invalid port: %v", targetPort) glog.Errorf("ExternalName Service %q has an invalid port (%v)", s.Name, targetPort)
return upsServers return upsServers
} }
if net.ParseIP(s.Spec.ExternalName) == nil { if net.ParseIP(s.Spec.ExternalName) == nil {
_, err := net.LookupHost(s.Spec.ExternalName) _, err := net.LookupHost(s.Spec.ExternalName)
if err != nil { if err != nil {
glog.Errorf("unexpected error resolving host %v: %v", s.Spec.ExternalName, err) glog.Errorf("Error resolving host %q: %v", s.Spec.ExternalName, err)
return upsServers return upsServers
} }
} }
@ -76,10 +69,10 @@ func getEndpoints(
}) })
} }
glog.V(3).Infof("getting endpoints for service %v/%v and port %v", s.Namespace, s.Name, port.String()) glog.V(3).Infof("Getting Endpoints for Service \"%v/%v\" and port %v", s.Namespace, s.Name, port.String())
ep, err := getServiceEndpoints(s) ep, err := getServiceEndpoints(s)
if err != nil { if err != nil {
glog.Warningf("unexpected error obtaining service endpoints: %v", err) glog.Warningf("Error obtaining Endpoints for Service \"%v/%v\": %v", s.Namespace, s.Name, err)
return upsServers return upsServers
} }
@ -99,14 +92,13 @@ func getEndpoints(
targetPort = epPort.Port targetPort = epPort.Port
} }
// check for invalid port value
if targetPort <= 0 { if targetPort <= 0 {
continue continue
} }
for _, epAddress := range ss.Addresses { for _, epAddress := range ss.Addresses {
ep := fmt.Sprintf("%v:%v", epAddress.IP, targetPort) ep := fmt.Sprintf("%v:%v", epAddress.IP, targetPort)
if _, exists := adus[ep]; exists { if _, exists := processedUpstreamServers[ep]; exists {
continue continue
} }
ups := ingress.Endpoint{ ups := ingress.Endpoint{
@ -117,11 +109,11 @@ func getEndpoints(
Target: epAddress.TargetRef, Target: epAddress.TargetRef,
} }
upsServers = append(upsServers, ups) upsServers = append(upsServers, ups)
adus[ep] = true processedUpstreamServers[ep] = struct{}{}
} }
} }
} }
glog.V(3).Infof("endpoints found: %v", upsServers) glog.V(3).Infof("Endpoints found for Service \"%v/%v\": %v", s.Namespace, s.Name, upsServers)
return upsServers return upsServers
} }

View file

@ -1,30 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collector
import "github.com/prometheus/client_golang/prometheus"
// Stopable defines a prometheus collector that can be stopped
type Stopable interface {
prometheus.Collector
Stop()
}
type scrapeRequest struct {
results chan<- prometheus.Metric
done chan struct{}
}

View file

@ -1,225 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collector
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"regexp"
"strconv"
"github.com/golang/glog"
)
var (
ac = regexp.MustCompile(`Active connections: (\d+)`)
sahr = regexp.MustCompile(`(\d+)\s(\d+)\s(\d+)`)
reading = regexp.MustCompile(`Reading: (\d+)`)
writing = regexp.MustCompile(`Writing: (\d+)`)
waiting = regexp.MustCompile(`Waiting: (\d+)`)
)
type basicStatus struct {
// Active total number of active connections
Active int
// Accepted total number of accepted client connections
Accepted int
// Handled total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).
Handled int
// Requests total number of client requests.
Requests int
// Reading current number of connections where nginx is reading the request header.
Reading int
// Writing current number of connections where nginx is writing the response back to the client.
Writing int
// Waiting current number of idle client connections waiting for a request.
Waiting int
}
// https://github.com/vozlt/nginx-module-vts
type vts struct {
NginxVersion string `json:"nginxVersion"`
LoadMsec int `json:"loadMsec"`
NowMsec int `json:"nowMsec"`
// Total connections and requests(same as stub_status_module in NGINX)
Connections connections `json:"connections"`
// Traffic(in/out) and request and response counts and cache hit ratio per each server zone
ServerZones map[string]serverZone `json:"serverZones"`
// Traffic(in/out) and request and response counts and cache hit ratio per each server zone filtered through
// the vhost_traffic_status_filter_by_set_key directive
FilterZones map[string]map[string]filterZone `json:"filterZones"`
// Traffic(in/out) and request and response counts per server in each upstream group
UpstreamZones map[string][]upstreamZone `json:"upstreamZones"`
}
type serverZone struct {
RequestCounter float64 `json:"requestCounter"`
InBytes float64 `json:"inBytes"`
OutBytes float64 `json:"outBytes"`
Responses response `json:"responses"`
Cache cache `json:"cache"`
}
type filterZone struct {
RequestCounter float64 `json:"requestCounter"`
InBytes float64 `json:"inBytes"`
OutBytes float64 `json:"outBytes"`
Cache cache `json:"cache"`
Responses response `json:"responses"`
}
type upstreamZone struct {
Responses response `json:"responses"`
Server string `json:"server"`
RequestCounter float64 `json:"requestCounter"`
InBytes float64 `json:"inBytes"`
OutBytes float64 `json:"outBytes"`
ResponseMsec float64 `json:"responseMsec"`
Weight float64 `json:"weight"`
MaxFails float64 `json:"maxFails"`
FailTimeout float64 `json:"failTimeout"`
Backup BoolToFloat64 `json:"backup"`
Down BoolToFloat64 `json:"down"`
}
type cache struct {
Miss float64 `json:"miss"`
Bypass float64 `json:"bypass"`
Expired float64 `json:"expired"`
Stale float64 `json:"stale"`
Updating float64 `json:"updating"`
Revalidated float64 `json:"revalidated"`
Hit float64 `json:"hit"`
Scarce float64 `json:"scarce"`
}
type response struct {
OneXx float64 `json:"1xx"`
TwoXx float64 `json:"2xx"`
TheeXx float64 `json:"3xx"`
FourXx float64 `json:"4xx"`
FiveXx float64 `json:"5xx"`
}
type connections struct {
Active float64 `json:"active"`
Reading float64 `json:"reading"`
Writing float64 `json:"writing"`
Waiting float64 `json:"waiting"`
Accepted float64 `json:"accepted"`
Handled float64 `json:"handled"`
Requests float64 `json:"requests"`
}
// BoolToFloat64 ...
type BoolToFloat64 float64
// UnmarshalJSON ...
func (bit BoolToFloat64) UnmarshalJSON(data []byte) error {
asString := string(data)
if asString == "1" || asString == "true" {
bit = 1
} else if asString == "0" || asString == "false" {
bit = 0
} else {
return fmt.Errorf(fmt.Sprintf("boolean unmarshal error: invalid input %s", asString))
}
return nil
}
func getNginxStatus(port int, path string) (*basicStatus, error) {
url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path)
glog.V(3).Infof("start scraping url: %v", url)
data, err := httpBody(url)
if err != nil {
return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err)
}
return parse(string(data)), nil
}
func httpBody(url string) ([]byte, error) {
resp, err := http.DefaultClient.Get(url)
if err != nil {
return nil, fmt.Errorf("unexpected error scraping nginx : %v", err)
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("unexpected error scraping nginx (%v)", err)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return nil, fmt.Errorf("unexpected error scraping nginx (status %v)", resp.StatusCode)
}
return data, nil
}
func getNginxVtsMetrics(port int, path string) (*vts, error) {
url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path)
glog.V(3).Infof("start scraping url: %v", url)
data, err := httpBody(url)
if err != nil {
return nil, fmt.Errorf("unexpected error scraping nginx vts (%v)", err)
}
var vts *vts
err = json.Unmarshal(data, &vts)
if err != nil {
return nil, fmt.Errorf("unexpected error json unmarshal (%v)", err)
}
glog.V(3).Infof("scrape returned : %v", vts)
return vts, nil
}
func parse(data string) *basicStatus {
acr := ac.FindStringSubmatch(data)
sahrr := sahr.FindStringSubmatch(data)
readingr := reading.FindStringSubmatch(data)
writingr := writing.FindStringSubmatch(data)
waitingr := waiting.FindStringSubmatch(data)
return &basicStatus{
toInt(acr, 1),
toInt(sahrr, 1),
toInt(sahrr, 2),
toInt(sahrr, 3),
toInt(readingr, 1),
toInt(writingr, 1),
toInt(waitingr, 1),
}
}
func toInt(data []string, pos int) int {
if len(data) == 0 {
return 0
}
if pos > len(data) {
return 0
}
if v, err := strconv.Atoi(data[pos]); err == nil {
return v
}
return 0
}

View file

@ -1,72 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collector
import (
"testing"
"github.com/kylelemons/godebug/pretty"
)
func TestParseStatus(t *testing.T) {
tests := []struct {
in string
out *basicStatus
}{
{`Active connections: 43
server accepts handled requests
7368 7368 10993
Reading: 0 Writing: 5 Waiting: 38`,
&basicStatus{43, 7368, 7368, 10993, 0, 5, 38},
},
{`Active connections: 0
server accepts handled requests
1 7 0
Reading: A Writing: B Waiting: 38`,
&basicStatus{0, 1, 7, 0, 0, 0, 38},
},
}
for _, test := range tests {
r := parse(test.in)
if diff := pretty.Compare(r, test.out); diff != "" {
t.Logf("%v", diff)
t.Fatalf("expected %v but returned %v", test.out, r)
}
}
}
func TestToint(t *testing.T) {
tests := []struct {
in []string
pos int
exp int
}{
{[]string{}, 0, 0},
{[]string{}, 1, 0},
{[]string{"A"}, 0, 0},
{[]string{"1"}, 0, 1},
{[]string{"a", "2"}, 1, 2},
}
for _, test := range tests {
v := toInt(test.in, test.pos)
if v != test.exp {
t.Fatalf("expected %v but returned %v", test.exp, v)
}
}
}

View file

@ -1,273 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collector
import (
"reflect"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
)
const ns = "nginx"
type (
vtsCollector struct {
scrapeChan chan scrapeRequest
port int
path string
data *vtsData
watchNamespace string
ingressClass string
}
vtsData struct {
bytes *prometheus.Desc
cache *prometheus.Desc
connections *prometheus.Desc
responses *prometheus.Desc
requests *prometheus.Desc
filterZoneBytes *prometheus.Desc
filterZoneResponses *prometheus.Desc
filterZoneCache *prometheus.Desc
upstreamBackup *prometheus.Desc
upstreamBytes *prometheus.Desc
upstreamDown *prometheus.Desc
upstreamFailTimeout *prometheus.Desc
upstreamMaxFails *prometheus.Desc
upstreamResponses *prometheus.Desc
upstreamRequests *prometheus.Desc
upstreamResponseMsec *prometheus.Desc
upstreamWeight *prometheus.Desc
}
)
// NewNGINXVTSCollector returns a new prometheus collector for the VTS module
func NewNGINXVTSCollector(watchNamespace, ingressClass string, port int, path string) Stopable {
p := vtsCollector{
scrapeChan: make(chan scrapeRequest),
port: port,
path: path,
watchNamespace: watchNamespace,
ingressClass: ingressClass,
}
p.data = &vtsData{
bytes: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "bytes_total"),
"Nginx bytes count",
[]string{"ingress_class", "namespace", "server_zone", "direction"}, nil),
cache: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "cache_total"),
"Nginx cache count",
[]string{"ingress_class", "namespace", "server_zone", "type"}, nil),
connections: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "connections_total"),
"Nginx connections count",
[]string{"ingress_class", "namespace", "type"}, nil),
responses: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "responses_total"),
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
[]string{"ingress_class", "namespace", "server_zone", "status_code"}, nil),
requests: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "requests_total"),
"The total number of requested client connections.",
[]string{"ingress_class", "namespace", "server_zone"}, nil),
filterZoneBytes: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "filterzone_bytes_total"),
"Nginx bytes count",
[]string{"ingress_class", "namespace", "server_zone", "key", "direction"}, nil),
filterZoneResponses: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "filterzone_responses_total"),
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
[]string{"ingress_class", "namespace", "server_zone", "key", "status_code"}, nil),
filterZoneCache: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "filterzone_cache_total"),
"Nginx cache count",
[]string{"ingress_class", "namespace", "server_zone", "key", "type"}, nil),
upstreamBackup: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "upstream_backup"),
"Current backup setting of the server.",
[]string{"ingress_class", "namespace", "upstream", "server"}, nil),
upstreamBytes: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "upstream_bytes_total"),
"The total number of bytes sent to this server.",
[]string{"ingress_class", "namespace", "upstream", "server", "direction"}, nil),
upstreamDown: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "vts_upstream_down_total"),
"Current down setting of the server.",
[]string{"ingress_class", "namespace", "upstream", "server"}, nil),
upstreamFailTimeout: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "upstream_fail_timeout"),
"Current fail_timeout setting of the server.",
[]string{"ingress_class", "namespace", "upstream", "server"}, nil),
upstreamMaxFails: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "upstream_maxfails"),
"Current max_fails setting of the server.",
[]string{"ingress_class", "namespace", "upstream", "server"}, nil),
upstreamResponses: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "upstream_responses_total"),
"The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
[]string{"ingress_class", "namespace", "upstream", "server", "status_code"}, nil),
upstreamRequests: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "upstream_requests_total"),
"The total number of client connections forwarded to this server.",
[]string{"ingress_class", "namespace", "upstream", "server"}, nil),
upstreamResponseMsec: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "upstream_response_msecs_avg"),
"The average of only upstream response processing times in milliseconds.",
[]string{"ingress_class", "namespace", "upstream", "server"}, nil),
upstreamWeight: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "upstream_weight"),
"Current upstream weight setting of the server.",
[]string{"ingress_class", "namespace", "upstream", "server"}, nil),
}
go p.start()
return p
}
// Describe implements prometheus.Collector.
func (p vtsCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- p.data.bytes
ch <- p.data.cache
ch <- p.data.connections
ch <- p.data.requests
ch <- p.data.responses
ch <- p.data.upstreamBackup
ch <- p.data.upstreamBytes
ch <- p.data.upstreamDown
ch <- p.data.upstreamFailTimeout
ch <- p.data.upstreamMaxFails
ch <- p.data.upstreamRequests
ch <- p.data.upstreamResponseMsec
ch <- p.data.upstreamResponses
ch <- p.data.upstreamWeight
ch <- p.data.filterZoneBytes
ch <- p.data.filterZoneCache
ch <- p.data.filterZoneResponses
}
// Collect implements prometheus.Collector.
func (p vtsCollector) Collect(ch chan<- prometheus.Metric) {
req := scrapeRequest{results: ch, done: make(chan struct{})}
p.scrapeChan <- req
<-req.done
}
func (p vtsCollector) start() {
for req := range p.scrapeChan {
ch := req.results
p.scrapeVts(ch)
req.done <- struct{}{}
}
}
func (p vtsCollector) Stop() {
close(p.scrapeChan)
}
// scrapeVts scrape nginx vts metrics
func (p vtsCollector) scrapeVts(ch chan<- prometheus.Metric) {
nginxMetrics, err := getNginxVtsMetrics(p.port, p.path)
if err != nil {
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
return
}
reflectMetrics(&nginxMetrics.Connections, p.data.connections, ch, p.ingressClass, p.watchNamespace)
for name, zones := range nginxMetrics.UpstreamZones {
for pos, value := range zones {
reflectMetrics(&zones[pos].Responses, p.data.upstreamResponses, ch, p.ingressClass, p.watchNamespace, name, value.Server)
ch <- prometheus.MustNewConstMetric(p.data.upstreamRequests,
prometheus.CounterValue, zones[pos].RequestCounter, p.ingressClass, p.watchNamespace, name, value.Server)
ch <- prometheus.MustNewConstMetric(p.data.upstreamDown,
prometheus.CounterValue, float64(zones[pos].Down), p.ingressClass, p.watchNamespace, name, value.Server)
ch <- prometheus.MustNewConstMetric(p.data.upstreamWeight,
prometheus.CounterValue, zones[pos].Weight, p.ingressClass, p.watchNamespace, name, value.Server)
ch <- prometheus.MustNewConstMetric(p.data.upstreamResponseMsec,
prometheus.CounterValue, zones[pos].ResponseMsec, p.ingressClass, p.watchNamespace, name, value.Server)
ch <- prometheus.MustNewConstMetric(p.data.upstreamBackup,
prometheus.CounterValue, float64(zones[pos].Backup), p.ingressClass, p.watchNamespace, name, value.Server)
ch <- prometheus.MustNewConstMetric(p.data.upstreamFailTimeout,
prometheus.CounterValue, zones[pos].FailTimeout, p.ingressClass, p.watchNamespace, name, value.Server)
ch <- prometheus.MustNewConstMetric(p.data.upstreamMaxFails,
prometheus.CounterValue, zones[pos].MaxFails, p.ingressClass, p.watchNamespace, name, value.Server)
ch <- prometheus.MustNewConstMetric(p.data.upstreamBytes,
prometheus.CounterValue, zones[pos].InBytes, p.ingressClass, p.watchNamespace, name, value.Server, "in")
ch <- prometheus.MustNewConstMetric(p.data.upstreamBytes,
prometheus.CounterValue, zones[pos].OutBytes, p.ingressClass, p.watchNamespace, name, value.Server, "out")
}
}
for name, zone := range nginxMetrics.ServerZones {
reflectMetrics(&zone.Responses, p.data.responses, ch, p.ingressClass, p.watchNamespace, name)
reflectMetrics(&zone.Cache, p.data.cache, ch, p.ingressClass, p.watchNamespace, name)
ch <- prometheus.MustNewConstMetric(p.data.requests,
prometheus.CounterValue, zone.RequestCounter, p.ingressClass, p.watchNamespace, name)
ch <- prometheus.MustNewConstMetric(p.data.bytes,
prometheus.CounterValue, zone.InBytes, p.ingressClass, p.watchNamespace, name, "in")
ch <- prometheus.MustNewConstMetric(p.data.bytes,
prometheus.CounterValue, zone.OutBytes, p.ingressClass, p.watchNamespace, name, "out")
}
for serverZone, keys := range nginxMetrics.FilterZones {
for name, zone := range keys {
reflectMetrics(&zone.Responses, p.data.filterZoneResponses, ch, p.ingressClass, p.watchNamespace, serverZone, name)
reflectMetrics(&zone.Cache, p.data.filterZoneCache, ch, p.ingressClass, p.watchNamespace, serverZone, name)
ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes,
prometheus.CounterValue, zone.InBytes, p.ingressClass, p.watchNamespace, serverZone, name, "in")
ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes,
prometheus.CounterValue, zone.OutBytes, p.ingressClass, p.watchNamespace, serverZone, name, "out")
}
}
}
func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) {
val := reflect.ValueOf(value).Elem()
for i := 0; i < val.NumField(); i++ {
tag := val.Type().Field(i).Tag
l := append(labels, tag.Get("json"))
ch <- prometheus.MustNewConstMetric(desc,
prometheus.CounterValue, val.Field(i).Interface().(float64),
l...)
}
}

View file

@ -38,7 +38,6 @@ import (
proxyproto "github.com/armon/go-proxyproto" proxyproto "github.com/armon/go-proxyproto"
"github.com/eapache/channels" "github.com/eapache/channels"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
@ -65,26 +64,14 @@ type statusModule string
const ( const (
ngxHealthPath = "/healthz" ngxHealthPath = "/healthz"
defaultStatusModule statusModule = "default"
vtsStatusModule statusModule = "vts"
) )
var ( var (
tmplPath = "/etc/nginx/template/nginx.tmpl" tmplPath = "/etc/nginx/template/nginx.tmpl"
cfgPath = "/etc/nginx/nginx.conf"
nginxBinary = "/usr/sbin/nginx"
) )
// NewNGINXController creates a new NGINX Ingress controller. // NewNGINXController creates a new NGINX Ingress controller.
// If the environment variable NGINX_BINARY exists it will be used
// as source for nginx commands
func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXController { func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXController {
ngx := os.Getenv("NGINX_BINARY")
if ngx == "" {
ngx = nginxBinary
}
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
@ -93,12 +80,10 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
h, err := dns.GetSystemNameServers() h, err := dns.GetSystemNameServers()
if err != nil { if err != nil {
glog.Warningf("unexpected error reading system nameservers: %v", err) glog.Warningf("Error reading system nameservers: %v", err)
} }
n := &NGINXController{ n := &NGINXController{
binary: ngx,
isIPV6Enabled: ing_net.IsIPv6Enabled(), isIPV6Enabled: ing_net.IsIPv6Enabled(),
resolver: h, resolver: h,
@ -116,8 +101,7 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
fileSystem: fs, fileSystem: fs,
// create an empty configuration. runningConfig: new(ingress.Configuration),
runningConfig: &ingress.Configuration{},
Proxy: &TCPProxy{}, Proxy: &TCPProxy{},
} }
@ -134,8 +118,6 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
fs, fs,
n.updateCh) n.updateCh)
n.stats = newStatsCollector(config.Namespace, class.IngressClass, n.binary, n.cfg.ListenPorts.Status)
n.syncQueue = task.NewTaskQueue(n.syncIngress) n.syncQueue = task.NewTaskQueue(n.syncIngress)
n.annotations = annotations.NewAnnotationExtractor(n.store) n.annotations = annotations.NewAnnotationExtractor(n.store)
@ -153,7 +135,7 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
UseNodeInternalIP: config.UseNodeInternalIP, UseNodeInternalIP: config.UseNodeInternalIP,
}) })
} else { } else {
glog.Warning("Update of ingress status is disabled (flag --update-status=false was specified)") glog.Warning("Update of Ingress status is disabled (flag --update-status)")
} }
onTemplateChange := func() { onTemplateChange := func() {
@ -162,68 +144,66 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
// this error is different from the rest because it must be clear why nginx is not working // this error is different from the rest because it must be clear why nginx is not working
glog.Errorf(` glog.Errorf(`
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
Error loading new template : %v Error loading new template: %v
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
`, err) `, err)
return return
} }
n.t = template n.t = template
glog.Info("new NGINX template loaded") glog.Info("New NGINX configuration template loaded.")
n.SetForceReload(true) n.syncQueue.EnqueueTask(task.GetDummyObject("template-change"))
} }
ngxTpl, err := ngx_template.NewTemplate(tmplPath, fs) ngxTpl, err := ngx_template.NewTemplate(tmplPath, fs)
if err != nil { if err != nil {
glog.Fatalf("invalid NGINX template: %v", err) glog.Fatalf("Invalid NGINX configuration template: %v", err)
} }
n.t = ngxTpl n.t = ngxTpl
// TODO: refactor
if _, ok := fs.(filesystem.DefaultFs); !ok { if _, ok := fs.(filesystem.DefaultFs); !ok {
watch.NewDummyFileWatcher(tmplPath, onTemplateChange) // do not setup watchers on tests
} else { return n
}
_, err = watch.NewFileWatcher(tmplPath, onTemplateChange) _, err = watch.NewFileWatcher(tmplPath, onTemplateChange)
if err != nil {
glog.Fatalf("Error creating file watcher for %v: %v", tmplPath, err)
}
filesToWatch := []string{}
err = filepath.Walk("/etc/nginx/geoip/", func(path string, info os.FileInfo, err error) error {
if err != nil { if err != nil {
glog.Fatalf("unexpected error creating file watcher: %v", err) return err
} }
filesToWatch := []string{} if info.IsDir() {
err := filepath.Walk("/etc/nginx/geoip/", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
filesToWatch = append(filesToWatch, path)
return nil return nil
}
filesToWatch = append(filesToWatch, path)
return nil
})
if err != nil {
glog.Fatalf("Error creating file watchers: %v", err)
}
for _, f := range filesToWatch {
_, err = watch.NewFileWatcher(f, func() {
glog.Info("File %v changed. Reloading NGINX", f)
n.syncQueue.EnqueueTask(task.GetDummyObject("file-change"))
}) })
if err != nil { if err != nil {
glog.Fatalf("unexpected error creating file watcher: %v", err) glog.Fatalf("Error creating file watcher for %v: %v", f, err)
} }
for _, f := range filesToWatch {
_, err = watch.NewFileWatcher(f, func() {
glog.Info("file %v changed. Reloading NGINX", f)
n.SetForceReload(true)
})
if err != nil {
glog.Fatalf("unexpected error creating file watcher: %v", err)
}
}
} }
return n return n
} }
// NGINXController ... // NGINXController describes a NGINX Ingress controller.
type NGINXController struct { type NGINXController struct {
cfg *Configuration cfg *Configuration
@ -237,30 +217,24 @@ type NGINXController struct {
syncRateLimiter flowcontrol.RateLimiter syncRateLimiter flowcontrol.RateLimiter
// stopLock is used to enforce only a single call to Stop is active. // stopLock is used to enforce that only a single call to Stop send at
// Needed because we allow stopping through an http endpoint and // a given time. We allow stopping through an HTTP endpoint and
// allowing concurrent stoppers leads to stack traces. // allowing concurrent stoppers leads to stack traces.
stopLock *sync.Mutex stopLock *sync.Mutex
stopCh chan struct{} stopCh chan struct{}
updateCh *channels.RingChannel updateCh *channels.RingChannel
// ngxErrCh channel used to detect errors with the nginx processes // ngxErrCh is used to detect errors with the NGINX processes
ngxErrCh chan error ngxErrCh chan error
// runningConfig contains the running configuration in the Backend // runningConfig contains the running configuration in the Backend
runningConfig *ingress.Configuration runningConfig *ingress.Configuration
forceReload int32
t *ngx_template.Template t *ngx_template.Template
binary string
resolver []net.IP resolver []net.IP
stats *statsCollector
statusModule statusModule
// returns true if IPV6 is enabled in the pod // returns true if IPV6 is enabled in the pod
isIPV6Enabled bool isIPV6Enabled bool
@ -273,9 +247,9 @@ type NGINXController struct {
fileSystem filesystem.Filesystem fileSystem filesystem.Filesystem
} }
// Start start a new NGINX master process running in foreground. // Start starts a new NGINX master process running in the foreground.
func (n *NGINXController) Start() { func (n *NGINXController) Start() {
glog.Infof("starting Ingress controller") glog.Infof("Starting NGINX Ingress controller")
n.store.Run(n.stopCh) n.store.Run(n.stopCh)
@ -283,9 +257,9 @@ func (n *NGINXController) Start() {
go n.syncStatus.Run() go n.syncStatus.Run()
} }
cmd := exec.Command(n.binary, "-c", cfgPath) cmd := nginxExecCommand()
// put nginx in another process group to prevent it // put NGINX in another process group to prevent it
// to receive signals meant for the controller // to receive signals meant for the controller
cmd.SysProcAttr = &syscall.SysProcAttr{ cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true, Setpgid: true,
@ -296,12 +270,12 @@ func (n *NGINXController) Start() {
n.setupSSLProxy() n.setupSSLProxy()
} }
glog.Info("starting NGINX process...") glog.Info("Starting NGINX process")
n.start(cmd) n.start(cmd)
go n.syncQueue.Run(time.Second, n.stopCh) go n.syncQueue.Run(time.Second, n.stopCh)
// force initial sync // force initial sync
n.syncQueue.Enqueue(&extensions.Ingress{}) n.syncQueue.EnqueueTask(task.GetDummyObject("initial-sync"))
for { for {
select { select {
@ -320,7 +294,7 @@ func (n *NGINXController) Start() {
// release command resources // release command resources
cmd.Process.Release() cmd.Process.Release()
// start a new nginx master process if the controller is not being stopped // start a new nginx master process if the controller is not being stopped
cmd = exec.Command(n.binary, "-c", cfgPath) cmd = nginxExecCommand()
cmd.SysProcAttr = &syscall.SysProcAttr{ cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true, Setpgid: true,
Pgid: 0, Pgid: 0,
@ -334,12 +308,14 @@ func (n *NGINXController) Start() {
if evt, ok := event.(store.Event); ok { if evt, ok := event.(store.Event); ok {
glog.V(3).Infof("Event %v received - object %v", evt.Type, evt.Obj) glog.V(3).Infof("Event %v received - object %v", evt.Type, evt.Obj)
if evt.Type == store.ConfigurationEvent { if evt.Type == store.ConfigurationEvent {
n.SetForceReload(true) // TODO: is this necessary? Consider removing this special case
n.syncQueue.EnqueueTask(task.GetDummyObject("configmap-change"))
continue
} }
n.syncQueue.Enqueue(evt.Obj) n.syncQueue.EnqueueSkippableTask(evt.Obj)
} else { } else {
glog.Warningf("unexpected event type received %T", event) glog.Warningf("Unexpected event type received %T", event)
} }
case <-n.stopCh: case <-n.stopCh:
break break
@ -354,12 +330,11 @@ func (n *NGINXController) Stop() error {
n.stopLock.Lock() n.stopLock.Lock()
defer n.stopLock.Unlock() defer n.stopLock.Unlock()
// Only try draining the workqueue if we haven't already.
if n.syncQueue.IsShuttingDown() { if n.syncQueue.IsShuttingDown() {
return fmt.Errorf("shutdown already in progress") return fmt.Errorf("shutdown already in progress")
} }
glog.Infof("shutting down controller queues") glog.Infof("Shutting down controller queues")
close(n.stopCh) close(n.stopCh)
go n.syncQueue.Shutdown() go n.syncQueue.Shutdown()
if n.syncStatus != nil { if n.syncStatus != nil {
@ -368,7 +343,7 @@ func (n *NGINXController) Stop() error {
// Send stop signal to Nginx // Send stop signal to Nginx
glog.Info("stopping NGINX process...") glog.Info("stopping NGINX process...")
cmd := exec.Command(n.binary, "-c", cfgPath, "-s", "quit") cmd := nginxExecCommand("-s", "quit")
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
err := cmd.Run() err := cmd.Run()
@ -376,7 +351,7 @@ func (n *NGINXController) Stop() error {
return err return err
} }
// Wait for the Nginx process disappear // wait for the NGINX process to terminate
timer := time.NewTicker(time.Second * 1) timer := time.NewTicker(time.Second * 1)
for range timer.C { for range timer.C {
if !process.IsNginxRunning() { if !process.IsNginxRunning() {
@ -393,7 +368,7 @@ func (n *NGINXController) start(cmd *exec.Cmd) {
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
glog.Fatalf("nginx error: %v", err) glog.Fatalf("NGINX error: %v", err)
n.ngxErrCh <- err n.ngxErrCh <- err
return return
} }
@ -416,18 +391,18 @@ func (n NGINXController) DefaultEndpoint() ingress.Endpoint {
// running the command "nginx -t" using a temporal file. // running the command "nginx -t" using a temporal file.
func (n NGINXController) testTemplate(cfg []byte) error { func (n NGINXController) testTemplate(cfg []byte) error {
if len(cfg) == 0 { if len(cfg) == 0 {
return fmt.Errorf("invalid nginx configuration (empty)") return fmt.Errorf("Invalid NGINX configuration (empty)")
} }
tmpfile, err := ioutil.TempFile("", "nginx-cfg") tmpfile, err := ioutil.TempFile("", "nginx-cfg")
if err != nil { if err != nil {
return err return err
} }
defer tmpfile.Close() defer tmpfile.Close()
err = ioutil.WriteFile(tmpfile.Name(), cfg, 0644) err = ioutil.WriteFile(tmpfile.Name(), cfg, file.ReadWriteByUser)
if err != nil { if err != nil {
return err return err
} }
out, err := exec.Command(n.binary, "-t", "-c", tmpfile.Name()).CombinedOutput() out, err := nginxTestCommand(tmpfile.Name()).CombinedOutput()
if err != nil { if err != nil {
// this error is different from the rest because it must be clear why nginx is not working // this error is different from the rest because it must be clear why nginx is not working
oe := fmt.Sprintf(` oe := fmt.Sprintf(`
@ -443,14 +418,10 @@ Error: %v
return nil return nil
} }
// OnUpdate is called periodically by syncQueue to keep the configuration in sync. // OnUpdate is called by the synchronization loop whenever configuration
// // changes were detected. The received backend Configuration is merged with the
// 1. converts configmap configuration to custom configuration object // configuration ConfigMap before generating the final configuration file.
// 2. write the custom template (the complexity depends on the implementation) // Returns nil in case the backend was successfully reloaded.
// 3. write the configuration file
//
// returning nil implies the backend will be reloaded.
// if an error is returned means requeue the update
func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
cfg := n.store.GetBackendConfiguration() cfg := n.store.GetBackendConfiguration()
cfg.Resolver = n.resolver cfg.Resolver = n.resolver
@ -460,7 +431,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
for _, pb := range ingressCfg.PassthroughBackends { for _, pb := range ingressCfg.PassthroughBackends {
svc := pb.Service svc := pb.Service
if svc == nil { if svc == nil {
glog.Warningf("missing service for PassthroughBackends %v", pb.Backend) glog.Warningf("Missing Service for SSL Passthrough backend %q", pb.Backend)
continue continue
} }
port, err := strconv.Atoi(pb.Port.String()) port, err := strconv.Atoi(pb.Port.String())
@ -480,7 +451,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
} }
} }
//TODO: Allow PassthroughBackends to specify they support proxy-protocol // TODO: Allow PassthroughBackends to specify they support proxy-protocol
servers = append(servers, &TCPServer{ servers = append(servers, &TCPServer{
Hostname: pb.Hostname, Hostname: pb.Hostname,
IP: svc.Spec.ClusterIP, IP: svc.Spec.ClusterIP,
@ -492,13 +463,6 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
n.Proxy.ServerList = servers n.Proxy.ServerList = servers
} }
// we need to check if the status module configuration changed
if cfg.EnableVtsStatus {
n.setupMonitor(vtsStatusModule)
} else {
n.setupMonitor(defaultStatusModule)
}
// NGINX cannot resize the hash tables used to store server names. // NGINX cannot resize the hash tables used to store server names.
// For this reason we check if the defined size defined is correct // For this reason we check if the defined size defined is correct
// for the FQDN defined in the ingress rules adjusting the value // for the FQDN defined in the ingress rules adjusting the value
@ -520,7 +484,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
} else { } else {
n = fmt.Sprintf("www.%v", srv.Hostname) n = fmt.Sprintf("www.%v", srv.Hostname)
} }
glog.V(3).Infof("creating redirect from %v to %v", srv.Hostname, n) glog.V(3).Infof("Creating redirect from %q to %q", srv.Hostname, n)
if _, ok := redirectServers[n]; !ok { if _, ok := redirectServers[n]; !ok {
found := false found := false
for _, esrv := range ingressCfg.Servers { for _, esrv := range ingressCfg.Servers {
@ -537,24 +501,24 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
} }
if cfg.ServerNameHashBucketSize == 0 { if cfg.ServerNameHashBucketSize == 0 {
nameHashBucketSize := nginxHashBucketSize(longestName) nameHashBucketSize := nginxHashBucketSize(longestName)
glog.V(3).Infof("adjusting ServerNameHashBucketSize variable to %v", nameHashBucketSize) glog.V(3).Infof("Adjusting ServerNameHashBucketSize variable to %q", nameHashBucketSize)
cfg.ServerNameHashBucketSize = nameHashBucketSize cfg.ServerNameHashBucketSize = nameHashBucketSize
} }
serverNameHashMaxSize := nextPowerOf2(serverNameBytes) serverNameHashMaxSize := nextPowerOf2(serverNameBytes)
if cfg.ServerNameHashMaxSize < serverNameHashMaxSize { if cfg.ServerNameHashMaxSize < serverNameHashMaxSize {
glog.V(3).Infof("adjusting ServerNameHashMaxSize variable to %v", serverNameHashMaxSize) glog.V(3).Infof("Adjusting ServerNameHashMaxSize variable to %q", serverNameHashMaxSize)
cfg.ServerNameHashMaxSize = serverNameHashMaxSize cfg.ServerNameHashMaxSize = serverNameHashMaxSize
} }
// the limit of open files is per worker process // the limit of open files is per worker process
// and we leave some room to avoid consuming all the FDs available // and we leave some room to avoid consuming all the FDs available
wp, err := strconv.Atoi(cfg.WorkerProcesses) wp, err := strconv.Atoi(cfg.WorkerProcesses)
glog.V(3).Infof("number of worker processes: %v", wp) glog.V(3).Infof("Number of worker processes: %d", wp)
if err != nil { if err != nil {
wp = 1 wp = 1
} }
maxOpenFiles := (sysctlFSFileMax() / wp) - 1024 maxOpenFiles := (sysctlFSFileMax() / wp) - 1024
glog.V(2).Infof("maximum number of open file descriptors : %v", maxOpenFiles) glog.V(2).Infof("Maximum number of open file descriptors: %d", maxOpenFiles)
if maxOpenFiles < 1024 { if maxOpenFiles < 1024 {
// this means the value of RLIMIT_NOFILE is too low. // this means the value of RLIMIT_NOFILE is too low.
maxOpenFiles = 1024 maxOpenFiles = 1024
@ -564,7 +528,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if cfg.ProxySetHeaders != "" { if cfg.ProxySetHeaders != "" {
cmap, err := n.store.GetConfigMap(cfg.ProxySetHeaders) cmap, err := n.store.GetConfigMap(cfg.ProxySetHeaders)
if err != nil { if err != nil {
glog.Warningf("unexpected error reading configmap %v: %v", cfg.ProxySetHeaders, err) glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.ProxySetHeaders, err)
} }
setHeaders = cmap.Data setHeaders = cmap.Data
@ -574,7 +538,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if cfg.AddHeaders != "" { if cfg.AddHeaders != "" {
cmap, err := n.store.GetConfigMap(cfg.AddHeaders) cmap, err := n.store.GetConfigMap(cfg.AddHeaders)
if err != nil { if err != nil {
glog.Warningf("unexpected error reading configmap %v: %v", cfg.AddHeaders, err) glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.AddHeaders, err)
} }
addHeaders = cmap.Data addHeaders = cmap.Data
@ -586,7 +550,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
secret, err := n.store.GetSecret(secretName) secret, err := n.store.GetSecret(secretName)
if err != nil { if err != nil {
glog.Warningf("unexpected error reading secret %v: %v", secretName, err) glog.Warningf("Error reading Secret %q from local store: %v", secretName, err)
} }
nsSecName := strings.Replace(secretName, "/", "-", -1) nsSecName := strings.Replace(secretName, "/", "-", -1)
@ -595,7 +559,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if ok { if ok {
pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh, n.fileSystem) pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh, n.fileSystem)
if err != nil { if err != nil {
glog.Warningf("unexpected error adding or updating dhparam %v file: %v", nsSecName, err) glog.Warningf("Error adding or updating dhparam file %v: %v", nsSecName, err)
} else { } else {
sslDHParam = pemFileName sslDHParam = pemFileName
} }
@ -647,31 +611,28 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
return err return err
} }
defer tmpfile.Close() defer tmpfile.Close()
err = ioutil.WriteFile(tmpfile.Name(), content, 0644) err = ioutil.WriteFile(tmpfile.Name(), content, file.ReadWriteByUser)
if err != nil { if err != nil {
return err return err
} }
// executing diff can return exit code != 0 // TODO: executing diff can return exit code != 0
diffOutput, _ := exec.Command("diff", "-u", cfgPath, tmpfile.Name()).CombinedOutput() diffOutput, _ := exec.Command("diff", "-u", cfgPath, tmpfile.Name()).CombinedOutput()
glog.Infof("NGINX configuration diff\n") glog.Infof("NGINX configuration diff:\n%v", string(diffOutput))
glog.Infof("%v\n", string(diffOutput))
// Do not use defer to remove the temporal file. // we do not defer the deletion of temp files in order
// This is helpful when there is an error in the // to keep them around for inspection in case of error
// temporal configuration (we can manually inspect the file).
// Only remove the file when no error occurred.
os.Remove(tmpfile.Name()) os.Remove(tmpfile.Name())
} }
} }
err = ioutil.WriteFile(cfgPath, content, 0644) err = ioutil.WriteFile(cfgPath, content, file.ReadWriteByUser)
if err != nil { if err != nil {
return err return err
} }
o, err := exec.Command(n.binary, "-s", "reload", "-c", cfgPath).CombinedOutput() o, err := nginxExecCommand("-s", "reload").CombinedOutput()
if err != nil { if err != nil {
return fmt.Errorf("%v\n%v", err, string(o)) return fmt.Errorf("%v\n%v", err, string(o))
} }
@ -679,9 +640,10 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
return nil return nil
} }
// nginxHashBucketSize computes the correct nginx hash_bucket_size for a hash with the given longest key // nginxHashBucketSize computes the correct NGINX hash_bucket_size for a hash
// with the given longest key.
func nginxHashBucketSize(longestString int) int { func nginxHashBucketSize(longestString int) int {
// See https://github.com/kubernetes/ingress-nginxs/issues/623 for an explanation // see https://github.com/kubernetes/ingress-nginxs/issues/623 for an explanation
wordSize := 8 // Assume 64 bit CPU wordSize := 8 // Assume 64 bit CPU
n := longestString + 2 n := longestString + 2
aligned := (n + wordSize - 1) & ^(wordSize - 1) aligned := (n + wordSize - 1) & ^(wordSize - 1)
@ -708,7 +670,7 @@ func (n *NGINXController) setupSSLProxy() {
sslPort := n.cfg.ListenPorts.HTTPS sslPort := n.cfg.ListenPorts.HTTPS
proxyPort := n.cfg.ListenPorts.SSLProxy proxyPort := n.cfg.ListenPorts.SSLProxy
glog.Info("starting TLS proxy for SSL passthrough") glog.Info("Starting TLS proxy for SSL Passthrough")
n.Proxy = &TCPProxy{ n.Proxy = &TCPProxy{
Default: &TCPServer{ Default: &TCPServer{
Hostname: "localhost", Hostname: "localhost",
@ -725,32 +687,33 @@ func (n *NGINXController) setupSSLProxy() {
proxyList := &proxyproto.Listener{Listener: listener, ProxyHeaderTimeout: cfg.ProxyProtocolHeaderTimeout} proxyList := &proxyproto.Listener{Listener: listener, ProxyHeaderTimeout: cfg.ProxyProtocolHeaderTimeout}
// start goroutine that accepts tcp connections in port 443 // accept TCP connections on the configured HTTPS port
go func() { go func() {
for { for {
var conn net.Conn var conn net.Conn
var err error var err error
if n.store.GetBackendConfiguration().UseProxyProtocol { if n.store.GetBackendConfiguration().UseProxyProtocol {
// we need to wrap the listener in order to decode // wrap the listener in order to decode Proxy
// proxy protocol before handling the connection // Protocol before handling the connection
conn, err = proxyList.Accept() conn, err = proxyList.Accept()
} else { } else {
conn, err = listener.Accept() conn, err = listener.Accept()
} }
if err != nil { if err != nil {
glog.Warningf("unexpected error accepting tcp connection: %v", err) glog.Warningf("Error accepting TCP connection: %v", err)
continue continue
} }
glog.V(3).Infof("remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr()) glog.V(3).Infof("Handling connection from remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr())
go n.Proxy.Handle(conn) go n.Proxy.Handle(conn)
} }
}() }()
} }
// IsDynamicConfigurationEnough decides if the new configuration changes can be dynamically applied without reloading // IsDynamicConfigurationEnough returns whether a Configuration can be
// dynamically applied, without reloading the backend.
func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configuration) bool { func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configuration) bool {
copyOfRunningConfig := *n.runningConfig copyOfRunningConfig := *n.runningConfig
copyOfPcfg := *pcfg copyOfPcfg := *pcfg
@ -761,8 +724,8 @@ func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configurati
return copyOfRunningConfig.Equal(&copyOfPcfg) return copyOfRunningConfig.Equal(&copyOfPcfg)
} }
// configureDynamically JSON encodes new Backends and POSTs it to an internal HTTP endpoint // configureDynamically encodes new Backends in JSON format and POSTs the
// that is handled by Lua // payload to an internal HTTP endpoint handled by Lua.
func configureDynamically(pcfg *ingress.Configuration, port int) error { func configureDynamically(pcfg *ingress.Configuration, port int) error {
backends := make([]*ingress.Backend, len(pcfg.Backends)) backends := make([]*ingress.Backend, len(pcfg.Backends))
@ -796,7 +759,7 @@ func configureDynamically(pcfg *ingress.Configuration, port int) error {
return err return err
} }
glog.V(2).Infof("posting backends configuration: %s", buf) glog.V(2).Infof("Posting backends configuration: %s", buf)
url := fmt.Sprintf("http://localhost:%d/configuration/backends", port) url := fmt.Sprintf("http://localhost:%d/configuration/backends", port)
resp, err := http.Post(url, "application/json", bytes.NewReader(buf)) resp, err := http.Post(url, "application/json", bytes.NewReader(buf))
@ -806,7 +769,7 @@ func configureDynamically(pcfg *ingress.Configuration, port int) error {
defer func() { defer func() {
if err := resp.Body.Close(); err != nil { if err := resp.Body.Close(); err != nil {
glog.Warningf("error while closing response body: \n%v", err) glog.Warningf("Error while closing response body:\n%v", err)
} }
}() }()

View file

@ -1,97 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/ingress-nginx/internal/ingress/controller/metric/collector"
)
const (
ngxStatusPath = "/nginx_status"
ngxVtsPath = "/nginx_status/format/json"
)
func (n *NGINXController) setupMonitor(sm statusModule) {
csm := n.statusModule
if csm != sm {
glog.Infof("changing prometheus collector from %v to %v", csm, sm)
n.stats.stop(csm)
n.stats.start(sm)
n.statusModule = sm
}
}
type statsCollector struct {
process prometheus.Collector
basic collector.Stopable
vts collector.Stopable
namespace string
watchClass string
port int
}
func (s *statsCollector) stop(sm statusModule) {
switch sm {
case defaultStatusModule:
s.basic.Stop()
prometheus.Unregister(s.basic)
case vtsStatusModule:
s.vts.Stop()
prometheus.Unregister(s.vts)
}
}
func (s *statsCollector) start(sm statusModule) {
switch sm {
case defaultStatusModule:
s.basic = collector.NewNginxStatus(s.namespace, s.watchClass, s.port, ngxStatusPath)
prometheus.Register(s.basic)
break
case vtsStatusModule:
s.vts = collector.NewNGINXVTSCollector(s.namespace, s.watchClass, s.port, ngxVtsPath)
prometheus.Register(s.vts)
break
}
}
func newStatsCollector(ns, class, binary string, port int) *statsCollector {
glog.Infof("starting new nginx stats collector for Ingress controller running in namespace %v (class %v)", ns, class)
glog.Infof("collector extracting information from port %v", port)
pc, err := collector.NewNamedProcess(true, collector.BinaryNameMatcher{
Name: "nginx",
Binary: binary,
})
if err != nil {
glog.Fatalf("unexpected error registering nginx collector: %v", err)
}
err = prometheus.Register(pc)
if err != nil {
glog.Fatalf("unexpected error registering nginx collector: %v", err)
}
return &statsCollector{
namespace: ns,
watchClass: class,
process: pc,
port: port,
}
}

View file

@ -479,6 +479,18 @@ func New(checkOCSP bool,
if key == configmap { if key == configmap {
store.setConfig(cm) store.setConfig(cm)
} }
ings := store.listers.IngressAnnotation.List()
for _, ingKey := range ings {
key := k8s.MetaNamespaceKey(ingKey)
ing, err := store.GetIngress(key)
if err != nil {
glog.Errorf("could not find Ingress %v in local store: %v", key, err)
continue
}
store.extractAnnotations(ing)
}
updateCh.In() <- Event{ updateCh.In() <- Event{
Type: ConfigurationEvent, Type: ConfigurationEvent,
Obj: cur, Obj: cur,
@ -494,6 +506,13 @@ func New(checkOCSP bool,
store.informers.ConfigMap.AddEventHandler(cmEventHandler) store.informers.ConfigMap.AddEventHandler(cmEventHandler)
store.informers.Service.AddEventHandler(cache.ResourceEventHandlerFuncs{}) store.informers.Service.AddEventHandler(cache.ResourceEventHandlerFuncs{})
// do not wait for informers to read the configmap configuration
cm, err := client.CoreV1().ConfigMaps(namespace).Get(configmap, metav1.GetOptions{})
if err != nil {
glog.Warningf("Unexpected error reading configuration configmap: %v", err)
}
store.setConfig(cm)
return store return store
} }
@ -699,7 +718,7 @@ func (s *k8sStore) setConfig(cmap *corev1.ConfigMap) {
glog.Warningf("unexpected error decoding key ssl-session-ticket-key: %v", err) glog.Warningf("unexpected error decoding key ssl-session-ticket-key: %v", err)
s.backendConfig.SSLSessionTicketKey = "" s.backendConfig.SSLSessionTicketKey = ""
} }
ioutil.WriteFile("/etc/nginx/tickets.key", d, 0644) ioutil.WriteFile("/etc/nginx/tickets.key", d, file.ReadWriteByUser)
} }
} }

View file

@ -26,7 +26,7 @@ import (
"github.com/paultag/sniff/parser" "github.com/paultag/sniff/parser"
) )
// TCPServer describes a server that works in passthrough mode // TCPServer describes a server that works in passthrough mode.
type TCPServer struct { type TCPServer struct {
Hostname string Hostname string
IP string IP string
@ -34,13 +34,13 @@ type TCPServer struct {
ProxyProtocol bool ProxyProtocol bool
} }
// TCPProxy describes the passthrough servers and a default as catch all // TCPProxy describes the passthrough servers and a default as catch all.
type TCPProxy struct { type TCPProxy struct {
ServerList []*TCPServer ServerList []*TCPServer
Default *TCPServer Default *TCPServer
} }
// Get returns the TCPServer to use // Get returns the TCPServer to use for a given host.
func (p *TCPProxy) Get(host string) *TCPServer { func (p *TCPProxy) Get(host string) *TCPServer {
if p.ServerList == nil { if p.ServerList == nil {
return p.Default return p.Default
@ -63,19 +63,19 @@ func (p *TCPProxy) Handle(conn net.Conn) {
length, err := conn.Read(data) length, err := conn.Read(data)
if err != nil { if err != nil {
glog.V(4).Infof("error reading the first 4k of the connection: %s", err) glog.V(4).Infof("Error reading the first 4k of the connection: %s", err)
return return
} }
proxy := p.Default proxy := p.Default
hostname, err := parser.GetHostname(data[:]) hostname, err := parser.GetHostname(data[:])
if err == nil { if err == nil {
glog.V(4).Infof("parsed hostname from TLS Client Hello: %s", hostname) glog.V(4).Infof("Parsed hostname from TLS Client Hello: %s", hostname)
proxy = p.Get(hostname) proxy = p.Get(hostname)
} }
if proxy == nil { if proxy == nil {
glog.V(4).Infof("there is no configured proxy for SSL connections") glog.V(4).Infof("There is no configured proxy for SSL connections.")
return return
} }
@ -86,7 +86,7 @@ func (p *TCPProxy) Handle(conn net.Conn) {
defer clientConn.Close() defer clientConn.Close()
if proxy.ProxyProtocol { if proxy.ProxyProtocol {
//Write out the proxy-protocol header // write out the Proxy Protocol header
localAddr := conn.LocalAddr().(*net.TCPAddr) localAddr := conn.LocalAddr().(*net.TCPAddr)
remoteAddr := conn.RemoteAddr().(*net.TCPAddr) remoteAddr := conn.RemoteAddr().(*net.TCPAddr)
protocol := "UNKNOWN" protocol := "UNKNOWN"
@ -96,16 +96,16 @@ func (p *TCPProxy) Handle(conn net.Conn) {
protocol = "TCP6" protocol = "TCP6"
} }
proxyProtocolHeader := fmt.Sprintf("PROXY %s %s %s %d %d\r\n", protocol, remoteAddr.IP.String(), localAddr.IP.String(), remoteAddr.Port, localAddr.Port) proxyProtocolHeader := fmt.Sprintf("PROXY %s %s %s %d %d\r\n", protocol, remoteAddr.IP.String(), localAddr.IP.String(), remoteAddr.Port, localAddr.Port)
glog.V(4).Infof("Writing proxy protocol header - %s", proxyProtocolHeader) glog.V(4).Infof("Writing Proxy Protocol header: %s", proxyProtocolHeader)
_, err = fmt.Fprintf(clientConn, proxyProtocolHeader) _, err = fmt.Fprintf(clientConn, proxyProtocolHeader)
} }
if err != nil { if err != nil {
glog.Errorf("unexpected error writing proxy-protocol header: %s", err) glog.Errorf("Error writing Proxy Protocol header: %s", err)
clientConn.Close() clientConn.Close()
} else { } else {
_, err = clientConn.Write(data[:length]) _, err = clientConn.Write(data[:length])
if err != nil { if err != nil {
glog.Errorf("unexpected error writing first 4k of proxy data: %s", err) glog.Errorf("Error writing the first 4k of proxy data: %s", err)
clientConn.Close() clientConn.Close()
} }
} }

View file

@ -25,6 +25,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"github.com/mitchellh/hashstructure"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
@ -191,6 +192,15 @@ func ReadConfig(src map[string]string) config.Configuration {
glog.Warningf("unexpected error merging defaults: %v", err) glog.Warningf("unexpected error merging defaults: %v", err)
} }
hash, err := hashstructure.Hash(to, &hashstructure.HashOptions{
TagName: "json",
})
if err != nil {
glog.Warningf("unexpected error obtaining hash: %v", err)
}
to.Checksum = fmt.Sprintf("%v", hash)
return to return to
} }

View file

@ -17,11 +17,13 @@ limitations under the License.
package template package template
import ( import (
"fmt"
"reflect" "reflect"
"testing" "testing"
"time" "time"
"github.com/kylelemons/godebug/pretty" "github.com/kylelemons/godebug/pretty"
"github.com/mitchellh/hashstructure"
"k8s.io/ingress-nginx/internal/ingress/controller/config" "k8s.io/ingress-nginx/internal/ingress/controller/config"
) )
@ -88,6 +90,14 @@ func TestMergeConfigMapToStruct(t *testing.T) {
def.NginxStatusIpv6Whitelist = []string{"::1", "2001::/16"} def.NginxStatusIpv6Whitelist = []string{"::1", "2001::/16"}
def.ProxyAddOriginalUriHeader = false def.ProxyAddOriginalUriHeader = false
hash, err := hashstructure.Hash(def, &hashstructure.HashOptions{
TagName: "json",
})
if err != nil {
t.Fatalf("unexpected error obtaining hash: %v", err)
}
def.Checksum = fmt.Sprintf("%v", hash)
to := ReadConfig(conf) to := ReadConfig(conf)
if diff := pretty.Compare(to, def); diff != "" { if diff := pretty.Compare(to, def); diff != "" {
t.Errorf("unexpected diff: (-got +want)\n%s", diff) t.Errorf("unexpected diff: (-got +want)\n%s", diff)
@ -107,6 +117,14 @@ func TestMergeConfigMapToStruct(t *testing.T) {
} }
def = config.NewDefault() def = config.NewDefault()
hash, err = hashstructure.Hash(def, &hashstructure.HashOptions{
TagName: "json",
})
if err != nil {
t.Fatalf("unexpected error obtaining hash: %v", err)
}
def.Checksum = fmt.Sprintf("%v", hash)
to = ReadConfig(map[string]string{}) to = ReadConfig(map[string]string{})
if diff := pretty.Compare(to, def); diff != "" { if diff := pretty.Compare(to, def); diff != "" {
t.Errorf("unexpected diff: (-got +want)\n%s", diff) t.Errorf("unexpected diff: (-got +want)\n%s", diff)
@ -114,6 +132,15 @@ func TestMergeConfigMapToStruct(t *testing.T) {
def = config.NewDefault() def = config.NewDefault()
def.WhitelistSourceRange = []string{"1.1.1.1/32"} def.WhitelistSourceRange = []string{"1.1.1.1/32"}
hash, err = hashstructure.Hash(def, &hashstructure.HashOptions{
TagName: "json",
})
if err != nil {
t.Fatalf("unexpected error obtaining hash: %v", err)
}
def.Checksum = fmt.Sprintf("%v", hash)
to = ReadConfig(map[string]string{ to = ReadConfig(map[string]string{
"whitelist-source-range": "1.1.1.1/32", "whitelist-source-range": "1.1.1.1/32",
}) })

View file

@ -17,6 +17,8 @@ limitations under the License.
package controller package controller
import ( import (
"os"
"os/exec"
"syscall" "syscall"
"github.com/golang/glog" "github.com/golang/glog"
@ -41,29 +43,53 @@ func newUpstream(name string) *ingress.Backend {
} }
} }
// sysctlSomaxconn returns the value of net.core.somaxconn, i.e. // sysctlSomaxconn returns the maximum number of connections that can be queued
// maximum number of connections that can be queued for acceptance // for acceptance (value of net.core.somaxconn)
// http://nginx.org/en/docs/http/ngx_http_core_module.html#listen // http://nginx.org/en/docs/http/ngx_http_core_module.html#listen
func sysctlSomaxconn() int { func sysctlSomaxconn() int {
maxConns, err := sysctl.New().GetSysctl("net/core/somaxconn") maxConns, err := sysctl.New().GetSysctl("net/core/somaxconn")
if err != nil || maxConns < 512 { if err != nil || maxConns < 512 {
glog.V(3).Infof("system net.core.somaxconn=%v (using system default)", maxConns) glog.V(3).Infof("net.core.somaxconn=%v (using system default)", maxConns)
return 511 return 511
} }
return maxConns return maxConns
} }
// sysctlFSFileMax returns the value of fs.file-max, i.e. // sysctlFSFileMax returns the maximum number of open file descriptors (value
// maximum number of open file descriptors // of fs.file-max) or 0 in case of error.
func sysctlFSFileMax() int { func sysctlFSFileMax() int {
var rLimit syscall.Rlimit var rLimit syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit)
if err != nil { if err != nil {
glog.Errorf("unexpected error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err) glog.Errorf("Error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err)
// returning 0 means don't render the value
return 0 return 0
} }
glog.V(2).Infof("rlimit.max=%v", rLimit.Max) glog.V(2).Infof("rlimit.max=%v", rLimit.Max)
return int(rLimit.Max) return int(rLimit.Max)
} }
const (
defBinary = "/usr/sbin/nginx"
cfgPath = "/etc/nginx/nginx.conf"
)
func nginxExecCommand(args ...string) *exec.Cmd {
ngx := os.Getenv("NGINX_BINARY")
if ngx == "" {
ngx = defBinary
}
cmdArgs := []string{"-c", cfgPath}
cmdArgs = append(cmdArgs, args...)
return exec.Command(ngx, cmdArgs...)
}
func nginxTestCommand(cfg string) *exec.Cmd {
ngx := os.Getenv("NGINX_BINARY")
if ngx == "" {
ngx = defBinary
}
return exec.Command(ngx, "-c", cfg, "-t")
}

View file

@ -0,0 +1,296 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collector
import (
"encoding/json"
"net"
"strings"
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
)
type socketData struct {
Host string `json:"host"` // Label
Status string `json:"status"` // Label
RealIPAddress string `json:"realIpAddr"` // Label
RemoteAddress string `json:"remoteAddr"` // Label
RemoteUser string `json:"remoteUser"` // Label
BytesSent float64 `json:"bytesSent"` // Metric
Protocol string `json:"protocol"` // Label
Method string `json:"method"` // Label
URI string `json:"uri"` // Label
RequestLength float64 `json:"requestLength"` // Metric
RequestTime float64 `json:"requestTime"` // Metric
UpstreamName string `json:"upstreamName"` // Label
UpstreamIP string `json:"upstreamIP"` // Label
UpstreamResponseTime float64 `json:"upstreamResponseTime"` // Metric
UpstreamStatus string `json:"upstreamStatus"` // Label
Namespace string `json:"namespace"` // Label
Ingress string `json:"ingress"` // Label
Service string `json:"service"` // Label
}
// SocketCollector stores prometheus metrics and ingress meta-data
type SocketCollector struct {
upstreamResponseTime *prometheus.HistogramVec
requestTime *prometheus.HistogramVec
requestLength *prometheus.HistogramVec
bytesSent *prometheus.HistogramVec
collectorSuccess *prometheus.GaugeVec
collectorSuccessTime *prometheus.GaugeVec
requests *prometheus.CounterVec
listener net.Listener
ns string
ingressClass string
}
// NewInstance creates a new SocketCollector instance
func NewInstance(ns string, class string) error {
sc := SocketCollector{}
ns = strings.Replace(ns, "-", "_", -1)
listener, err := net.Listen("unix", "/tmp/prometheus-nginx.socket")
if err != nil {
return err
}
sc.listener = listener
sc.ns = ns
sc.ingressClass = class
requestTags := []string{"host", "status", "remote_address", "real_ip_address", "remote_user", "protocol", "method", "uri", "upstream_name", "upstream_ip", "upstream_status", "namespace", "ingress", "service"}
collectorTags := []string{"namespace", "ingress_class"}
sc.upstreamResponseTime = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "upstream_response_time_seconds",
Help: "The time spent on receiving the response from the upstream server",
Namespace: ns,
},
requestTags,
)
sc.requestTime = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_duration_seconds",
Help: "The request processing time in seconds",
Namespace: ns,
},
requestTags,
)
sc.requestLength = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_length_bytes",
Help: "The request length (including request line, header, and request body)",
Namespace: ns,
Buckets: prometheus.LinearBuckets(10, 10, 10), // 10 buckets, each 10 bytes wide.
},
requestTags,
)
sc.requests = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "requests",
Help: "The total number of client requests.",
Namespace: ns,
},
collectorTags,
)
sc.bytesSent = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "bytes_sent",
Help: "The the number of bytes sent to a client",
Namespace: ns,
Buckets: prometheus.ExponentialBuckets(10, 10, 7), // 7 buckets, exponential factor of 10.
},
requestTags,
)
sc.collectorSuccess = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "collector_last_run_successful",
Help: "Whether the last collector run was successful (success = 1, failure = 0).",
Namespace: ns,
},
collectorTags,
)
sc.collectorSuccessTime = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "collector_last_run_successful_timestamp_seconds",
Help: "Timestamp of the last successful collector run",
Namespace: ns,
},
collectorTags,
)
prometheus.MustRegister(sc.upstreamResponseTime)
prometheus.MustRegister(sc.requestTime)
prometheus.MustRegister(sc.requestLength)
prometheus.MustRegister(sc.requests)
prometheus.MustRegister(sc.bytesSent)
prometheus.MustRegister(sc.collectorSuccess)
prometheus.MustRegister(sc.collectorSuccessTime)
go sc.Run()
return nil
}
func (sc *SocketCollector) handleMessage(msg []byte) {
glog.V(5).Infof("msg: %v", string(msg))
collectorSuccess := true
// Unmarshall bytes
var stats socketData
err := json.Unmarshal(msg, &stats)
if err != nil {
glog.Errorf("Unexpected error deserializing JSON paylod: %v", err)
collectorSuccess = false
return
}
// Create Request Labels Map
requestLabels := prometheus.Labels{
"host": stats.Host,
"status": stats.Status,
"remote_address": stats.RemoteAddress,
"real_ip_address": stats.RealIPAddress,
"remote_user": stats.RemoteUser,
"protocol": stats.Protocol,
"method": stats.Method,
"uri": stats.URI,
"upstream_name": stats.UpstreamName,
"upstream_ip": stats.UpstreamIP,
"upstream_status": stats.UpstreamStatus,
"namespace": stats.Namespace,
"ingress": stats.Ingress,
"service": stats.Service,
}
// Create Collector Labels Map
collectorLabels := prometheus.Labels{
"namespace": sc.ns,
"ingress_class": sc.ingressClass,
}
// Emit metrics
requestsMetric, err := sc.requests.GetMetricWith(collectorLabels)
if err != nil {
glog.Errorf("Error fetching requests metric: %v", err)
collectorSuccess = false
} else {
requestsMetric.Inc()
}
if stats.UpstreamResponseTime != -1 {
upstreamResponseTimeMetric, err := sc.upstreamResponseTime.GetMetricWith(requestLabels)
if err != nil {
glog.Errorf("Error fetching upstream response time metric: %v", err)
collectorSuccess = false
} else {
upstreamResponseTimeMetric.Observe(stats.UpstreamResponseTime)
}
}
if stats.RequestTime != -1 {
requestTimeMetric, err := sc.requestTime.GetMetricWith(requestLabels)
if err != nil {
glog.Errorf("Error fetching request duration metric: %v", err)
collectorSuccess = false
} else {
requestTimeMetric.Observe(stats.RequestTime)
}
}
if stats.RequestLength != -1 {
requestLengthMetric, err := sc.requestLength.GetMetricWith(requestLabels)
if err != nil {
glog.Errorf("Error fetching request length metric: %v", err)
collectorSuccess = false
} else {
requestLengthMetric.Observe(stats.RequestLength)
}
}
if stats.BytesSent != -1 {
bytesSentMetric, err := sc.bytesSent.GetMetricWith(requestLabels)
if err != nil {
glog.Errorf("Error fetching bytes sent metric: %v", err)
collectorSuccess = false
} else {
bytesSentMetric.Observe(stats.BytesSent)
}
}
collectorSuccessMetric, err := sc.collectorSuccess.GetMetricWith(collectorLabels)
if err != nil {
glog.Errorf("Error fetching collector success metric: %v", err)
} else {
if collectorSuccess {
collectorSuccessMetric.Set(1)
collectorSuccessTimeMetric, err := sc.collectorSuccessTime.GetMetricWith(collectorLabels)
if err != nil {
glog.Errorf("Error fetching collector success time metric: %v", err)
} else {
collectorSuccessTimeMetric.Set(float64(time.Now().Unix()))
}
} else {
collectorSuccessMetric.Set(0)
}
}
}
// Run listen for connections in the unix socket and spawns a goroutine to process the content
func (sc *SocketCollector) Run() {
for {
conn, err := sc.listener.Accept()
if err != nil {
continue
}
go handleMessages(conn, sc.handleMessage)
}
}
const packetSize = 1024 * 65
// handleMessages process the content received in a network connection
func handleMessages(conn net.Conn, fn func([]byte)) {
defer conn.Close()
msg := make([]byte, packetSize)
s, err := conn.Read(msg[0:])
if err != nil {
return
}
fn(msg[0:s])
}

View file

@ -0,0 +1,66 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collector
import (
"fmt"
"net"
"sync/atomic"
"testing"
"time"
)
func TestNewUDPLogListener(t *testing.T) {
var count uint64
fn := func(message []byte) {
t.Logf("message: %v", string(message))
atomic.AddUint64(&count, 1)
}
tmpFile := fmt.Sprintf("/tmp/test-socket-%v", time.Now().Nanosecond())
l, err := net.Listen("unix", tmpFile)
if err != nil {
t.Fatalf("unexpected error creating unix socket: %v", err)
}
if l == nil {
t.Fatalf("expected a listener but none returned")
}
defer l.Close()
go func() {
for {
conn, err := l.Accept()
if err != nil {
continue
}
go handleMessages(conn, fn)
}
}()
conn, _ := net.Dial("unix", tmpFile)
conn.Write([]byte("message"))
conn.Close()
time.Sleep(1 * time.Millisecond)
if count != 1 {
t.Errorf("expected only one message from the UDP listern but %v returned", count)
}
}

View file

@ -17,16 +17,30 @@ limitations under the License.
package collector package collector
import ( import (
"fmt"
"io/ioutil"
"net/http"
"regexp"
"strconv"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
var (
ac = regexp.MustCompile(`Active connections: (\d+)`)
sahr = regexp.MustCompile(`(\d+)\s(\d+)\s(\d+)`)
reading = regexp.MustCompile(`Reading: (\d+)`)
writing = regexp.MustCompile(`Writing: (\d+)`)
waiting = regexp.MustCompile(`Waiting: (\d+)`)
)
type ( type (
nginxStatusCollector struct { nginxStatusCollector struct {
scrapeChan chan scrapeRequest scrapeChan chan scrapeRequest
ngxHealthPort int ngxHealthPort int
ngxVtsPath string ngxStatusPath string
data *nginxStatusData data *nginxStatusData
watchNamespace string watchNamespace string
ingressClass string ingressClass string
@ -37,15 +51,33 @@ type (
requestsTotal *prometheus.Desc requestsTotal *prometheus.Desc
connections *prometheus.Desc connections *prometheus.Desc
} }
basicStatus struct {
// Active total number of active connections
Active int
// Accepted total number of accepted client connections
Accepted int
// Handled total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit).
Handled int
// Requests total number of client requests.
Requests int
// Reading current number of connections where nginx is reading the request header.
Reading int
// Writing current number of connections where nginx is writing the response back to the client.
Writing int
// Waiting current number of idle client connections waiting for a request.
Waiting int
}
) )
// NewNginxStatus returns a new prometheus collector the default nginx status module // InitNGINXStatusCollector returns a new prometheus collector the default nginx status module
func NewNginxStatus(watchNamespace, ingressClass string, ngxHealthPort int, ngxVtsPath string) Stopable { func InitNGINXStatusCollector(watchNamespace, ingressClass string, ngxHealthPort int) error {
const ns string = "nginx"
const ngxStatusPath = "/nginx_status"
p := nginxStatusCollector{ p := nginxStatusCollector{
scrapeChan: make(chan scrapeRequest), scrapeChan: make(chan scrapeRequest),
ngxHealthPort: ngxHealthPort, ngxHealthPort: ngxHealthPort,
ngxVtsPath: ngxVtsPath, ngxStatusPath: ngxStatusPath,
watchNamespace: watchNamespace, watchNamespace: watchNamespace,
ingressClass: ingressClass, ingressClass: ingressClass,
} }
@ -62,14 +94,20 @@ func NewNginxStatus(watchNamespace, ingressClass string, ngxHealthPort int, ngxV
[]string{"ingress_class", "namespace"}, nil), []string{"ingress_class", "namespace"}, nil),
connections: prometheus.NewDesc( connections: prometheus.NewDesc(
prometheus.BuildFQName(ns, "", "connnections"), prometheus.BuildFQName(ns, "", "connections"),
"current number of client connections with state {reading, writing, waiting}", "current number of client connections with state {reading, writing, waiting}",
[]string{"ingress_class", "namespace", "state"}, nil), []string{"ingress_class", "namespace", "state"}, nil),
} }
go p.start() err := prometheus.Register(p)
return p if err != nil {
return fmt.Errorf("error while registering nginx status collector : %v", err)
}
go p.Run()
return nil
} }
// Describe implements prometheus.Collector. // Describe implements prometheus.Collector.
@ -86,7 +124,7 @@ func (p nginxStatusCollector) Collect(ch chan<- prometheus.Metric) {
<-req.done <-req.done
} }
func (p nginxStatusCollector) start() { func (p nginxStatusCollector) Run() {
for req := range p.scrapeChan { for req := range p.scrapeChan {
ch := req.results ch := req.results
p.scrape(ch) p.scrape(ch)
@ -98,9 +136,71 @@ func (p nginxStatusCollector) Stop() {
close(p.scrapeChan) close(p.scrapeChan)
} }
func httpBody(url string) ([]byte, error) {
resp, err := http.DefaultClient.Get(url)
if err != nil {
return nil, fmt.Errorf("unexpected error scraping nginx : %v", err)
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("unexpected error scraping nginx (%v)", err)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return nil, fmt.Errorf("unexpected error scraping nginx (status %v)", resp.StatusCode)
}
return data, nil
}
func toInt(data []string, pos int) int {
if len(data) == 0 {
return 0
}
if pos > len(data) {
return 0
}
if v, err := strconv.Atoi(data[pos]); err == nil {
return v
}
return 0
}
func parse(data string) *basicStatus {
acr := ac.FindStringSubmatch(data)
sahrr := sahr.FindStringSubmatch(data)
readingr := reading.FindStringSubmatch(data)
writingr := writing.FindStringSubmatch(data)
waitingr := waiting.FindStringSubmatch(data)
return &basicStatus{
toInt(acr, 1),
toInt(sahrr, 1),
toInt(sahrr, 2),
toInt(sahrr, 3),
toInt(readingr, 1),
toInt(writingr, 1),
toInt(waitingr, 1),
}
}
func getNginxStatus(port int, path string) (*basicStatus, error) {
url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path)
glog.V(3).Infof("start scraping url: %v", url)
data, err := httpBody(url)
if err != nil {
return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err)
}
return parse(string(data)), nil
}
// nginxStatusCollector scrape the nginx status // nginxStatusCollector scrape the nginx status
func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) { func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) {
s, err := getNginxStatus(p.ngxHealthPort, p.ngxVtsPath) s, err := getNginxStatus(p.ngxHealthPort, p.ngxStatusPath)
if err != nil { if err != nil {
glog.Warningf("unexpected error obtaining nginx status info: %v", err) glog.Warningf("unexpected error obtaining nginx status info: %v", err)
return return

View file

@ -26,6 +26,17 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
type scrapeRequest struct {
results chan<- prometheus.Metric
done chan struct{}
}
// Stopable defines a prometheus collector that can be stopped
type Stopable interface {
prometheus.Collector
Stop()
}
// BinaryNameMatcher ... // BinaryNameMatcher ...
type BinaryNameMatcher struct { type BinaryNameMatcher struct {
Name string Name string
@ -60,8 +71,8 @@ type namedProcess struct {
data namedProcessData data namedProcessData
} }
// NewNamedProcess returns a new prometheus collector for the nginx process // newNamedProcess returns a new prometheus collector for the nginx process
func NewNamedProcess(children bool, mn common.MatchNamer) (prometheus.Collector, error) { func newNamedProcess(children bool, mn common.MatchNamer) (prometheus.Collector, error) {
fs, err := proc.NewFS("/proc") fs, err := proc.NewFS("/proc")
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -32,7 +32,6 @@ import (
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection"
@ -183,11 +182,6 @@ func NewStatusSyncer(config Config) Sync {
OnStartedLeading: func(stop <-chan struct{}) { OnStartedLeading: func(stop <-chan struct{}) {
glog.V(2).Infof("I am the new status update leader") glog.V(2).Infof("I am the new status update leader")
go st.syncQueue.Run(time.Second, stop) go st.syncQueue.Run(time.Second, stop)
wait.PollUntil(updateInterval, func() (bool, error) {
// send a dummy object to the queue to force a sync
st.syncQueue.Enqueue("sync status")
return false, nil
}, stop)
}, },
OnStoppedLeading: func() { OnStoppedLeading: func() {
glog.V(2).Infof("I am not status update leader anymore") glog.V(2).Infof("I am not status update leader anymore")

View file

@ -63,6 +63,9 @@ type Configuration struct {
// It contains information about the associated Server Name Indication (SNI). // It contains information about the associated Server Name Indication (SNI).
// +optional // +optional
PassthroughBackends []*SSLPassthroughBackend `json:"passthroughBackends,omitempty"` PassthroughBackends []*SSLPassthroughBackend `json:"passthroughBackends,omitempty"`
// ConfigurationChecksum contains the particular checksum of a Configuration object
ConfigurationChecksum string `json:"configurationChecksum,omitempty"`
} }
// Backend describes one or more remote server/s (endpoints) associated with a service // Backend describes one or more remote server/s (endpoints) associated with a service
@ -230,10 +233,6 @@ type Location struct {
// UsePortInRedirects indicates if redirects must specify the port // UsePortInRedirects indicates if redirects must specify the port
// +optional // +optional
UsePortInRedirects bool `json:"usePortInRedirects"` UsePortInRedirects bool `json:"usePortInRedirects"`
// VtsFilterKey contains the vts filter key on the location level
// https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key
// +optional
VtsFilterKey string `json:"vtsFilterKey,omitempty"`
// ConfigurationSnippet contains additional configuration for the backend // ConfigurationSnippet contains additional configuration for the backend
// to be considered in the configuration of the location // to be considered in the configuration of the location
ConfigurationSnippet string `json:"configurationSnippet"` ConfigurationSnippet string `json:"configurationSnippet"`

View file

@ -104,6 +104,10 @@ func (c1 *Configuration) Equal(c2 *Configuration) bool {
} }
} }
if c1.ConfigurationChecksum != c2.ConfigurationChecksum {
return false
}
return true return true
} }
@ -256,28 +260,34 @@ func (s1 *Server) Equal(s2 *Server) bool {
if s1.Hostname != s2.Hostname { if s1.Hostname != s2.Hostname {
return false return false
} }
if s1.Alias != s2.Alias {
return false
}
if s1.SSLPassthrough != s2.SSLPassthrough { if s1.SSLPassthrough != s2.SSLPassthrough {
return false return false
} }
if !(&s1.SSLCert).Equal(&s2.SSLCert) { if !(&s1.SSLCert).Equal(&s2.SSLCert) {
return false return false
} }
if !(&s1.CertificateAuth).Equal(&s2.CertificateAuth) { if s1.Alias != s2.Alias {
return false return false
} }
if s1.RedirectFromToWWW != s2.RedirectFromToWWW { if s1.RedirectFromToWWW != s2.RedirectFromToWWW {
return false return false
} }
if !(&s1.CertificateAuth).Equal(&s2.CertificateAuth) {
if len(s1.Locations) != len(s2.Locations) { return false
}
if s1.ServerSnippet != s2.ServerSnippet {
return false return false
} }
if s1.SSLCiphers != s2.SSLCiphers { if s1.SSLCiphers != s2.SSLCiphers {
return false return false
} }
if s1.AuthTLSError != s2.AuthTLSError {
return false
}
if len(s1.Locations) != len(s2.Locations) {
return false
}
// Location are sorted // Location are sorted
for idx, s1l := range s1.Locations { for idx, s1l := range s1.Locations {

View file

@ -21,6 +21,8 @@ import (
"net" "net"
"os" "os"
"testing" "testing"
"k8s.io/ingress-nginx/internal/file"
) )
func TestGetDNSServers(t *testing.T) { func TestGetDNSServers(t *testing.T) {
@ -32,22 +34,22 @@ func TestGetDNSServers(t *testing.T) {
t.Error("expected at least 1 nameserver in /etc/resolv.conf") t.Error("expected at least 1 nameserver in /etc/resolv.conf")
} }
file, err := ioutil.TempFile("", "fw") f, err := ioutil.TempFile("", "fw")
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
defer file.Close() defer f.Close()
defer os.Remove(file.Name()) defer os.Remove(f.Name())
ioutil.WriteFile(file.Name(), []byte(` ioutil.WriteFile(f.Name(), []byte(`
# comment # comment
; comment ; comment
nameserver 2001:4860:4860::8844 nameserver 2001:4860:4860::8844
nameserver 2001:4860:4860::8888 nameserver 2001:4860:4860::8888
nameserver 8.8.8.8 nameserver 8.8.8.8
`), 0644) `), file.ReadWriteByUser)
defResolvConf = file.Name() defResolvConf = f.Name()
s, err = GetSystemNameServers() s, err = GetSystemNameServers()
if err != nil { if err != nil {
t.Fatalf("unexpected error reading /etc/resolv.conf file: %v", err) t.Fatalf("unexpected error reading /etc/resolv.conf file: %v", err)

View file

@ -22,6 +22,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
@ -50,23 +51,39 @@ type Queue struct {
// Element represents one item of the queue // Element represents one item of the queue
type Element struct { type Element struct {
Key interface{} Key interface{}
Timestamp int64 Timestamp int64
IsSkippable bool
} }
// Run ... // Run starts processing elements in the queue
func (t *Queue) Run(period time.Duration, stopCh <-chan struct{}) { func (t *Queue) Run(period time.Duration, stopCh <-chan struct{}) {
wait.Until(t.worker, period, stopCh) wait.Until(t.worker, period, stopCh)
} }
// Enqueue enqueues ns/name of the given api object in the task queue. // EnqueueTask enqueues ns/name of the given api object in the task queue.
func (t *Queue) Enqueue(obj interface{}) { func (t *Queue) EnqueueTask(obj interface{}) {
t.enqueue(obj, false)
}
// EnqueueSkippableTask enqueues ns/name of the given api object in
// the task queue that can be skipped
func (t *Queue) EnqueueSkippableTask(obj interface{}) {
t.enqueue(obj, true)
}
// enqueue enqueues ns/name of the given api object in the task queue.
func (t *Queue) enqueue(obj interface{}, skippable bool) {
if t.IsShuttingDown() { if t.IsShuttingDown() {
glog.Errorf("queue has been shutdown, failed to enqueue: %v", obj) glog.Errorf("queue has been shutdown, failed to enqueue: %v", obj)
return return
} }
ts := time.Now().UnixNano() ts := time.Now().UnixNano()
if !skippable {
// make sure the timestamp is bigger than lastSync
ts = time.Now().Add(24 * time.Hour).UnixNano()
}
glog.V(3).Infof("queuing item %v", obj) glog.V(3).Infof("queuing item %v", obj)
key, err := t.fn(obj) key, err := t.fn(obj)
if err != nil { if err != nil {
@ -166,3 +183,10 @@ func NewCustomTaskQueue(syncFn func(interface{}) error, fn func(interface{}) (in
return q return q
} }
// GetDummyObject returns a valid object that can be used in the Queue
func GetDummyObject(name string) *metav1.ObjectMeta {
return &metav1.ObjectMeta{
Name: name,
}
}

View file

@ -71,7 +71,7 @@ func TestEnqueueSuccess(t *testing.T) {
k: "testKey", k: "testKey",
v: "testValue", v: "testValue",
} }
q.Enqueue(mo) q.EnqueueSkippableTask(mo)
// wait for 'mockSynFn' // wait for 'mockSynFn'
time.Sleep(time.Millisecond * 10) time.Sleep(time.Millisecond * 10)
if atomic.LoadUint32(&sr) != 1 { if atomic.LoadUint32(&sr) != 1 {
@ -99,7 +99,7 @@ func TestEnqueueFailed(t *testing.T) {
q.Shutdown() q.Shutdown()
// wait for shutdown // wait for shutdown
time.Sleep(time.Millisecond * 10) time.Sleep(time.Millisecond * 10)
q.Enqueue(mo) q.EnqueueSkippableTask(mo)
// wait for 'mockSynFn' // wait for 'mockSynFn'
time.Sleep(time.Millisecond * 10) time.Sleep(time.Millisecond * 10)
// queue is shutdown, so mockSynFn should not be executed, so the result should be 0 // queue is shutdown, so mockSynFn should not be executed, so the result should be 0
@ -121,7 +121,7 @@ func TestEnqueueKeyError(t *testing.T) {
v: "testValue", v: "testValue",
} }
q.Enqueue(mo) q.EnqueueSkippableTask(mo)
// wait for 'mockSynFn' // wait for 'mockSynFn'
time.Sleep(time.Millisecond * 10) time.Sleep(time.Millisecond * 10)
// key error, so the result should be 0 // key error, so the result should be 0
@ -142,16 +142,16 @@ func TestSkipEnqueue(t *testing.T) {
k: "testKey", k: "testKey",
v: "testValue", v: "testValue",
} }
q.Enqueue(mo) q.EnqueueSkippableTask(mo)
q.Enqueue(mo) q.EnqueueSkippableTask(mo)
q.Enqueue(mo) q.EnqueueTask(mo)
q.Enqueue(mo) q.EnqueueSkippableTask(mo)
// run queue // run queue
go q.Run(time.Second, stopCh) go q.Run(time.Second, stopCh)
// wait for 'mockSynFn' // wait for 'mockSynFn'
time.Sleep(time.Millisecond * 10) time.Sleep(time.Millisecond * 10)
if atomic.LoadUint32(&sr) != 1 { if atomic.LoadUint32(&sr) != 2 {
t.Errorf("sr should be 1, but is %d", sr) t.Errorf("sr should be 2, but is %d", sr)
} }
// shutdown queue before exit // shutdown queue before exit

View file

@ -1,30 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
// DummyFileWatcher noop implementation of a file watcher
type DummyFileWatcher struct{}
// NewDummyFileWatcher creates a FileWatcher using the DummyFileWatcher
func NewDummyFileWatcher(file string, onEvent func()) FileWatcher {
return DummyFileWatcher{}
}
// Close ends the watch
func (f DummyFileWatcher) Close() error {
return nil
}

View file

@ -21,6 +21,8 @@ import (
"os" "os"
"testing" "testing"
"time" "time"
"k8s.io/ingress-nginx/internal/file"
) )
func prepareTimeout() chan bool { func prepareTimeout() chan bool {
@ -33,15 +35,15 @@ func prepareTimeout() chan bool {
} }
func TestFileWatcher(t *testing.T) { func TestFileWatcher(t *testing.T) {
file, err := ioutil.TempFile("", "fw") f, err := ioutil.TempFile("", "fw")
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
defer file.Close() defer f.Close()
defer os.Remove(file.Name()) defer os.Remove(f.Name())
count := 0 count := 0
events := make(chan bool, 10) events := make(chan bool, 10)
fw, err := NewFileWatcher(file.Name(), func() { fw, err := NewFileWatcher(f.Name(), func() {
count++ count++
if count != 1 { if count != 1 {
t.Fatalf("expected 1 but returned %v", count) t.Fatalf("expected 1 but returned %v", count)
@ -58,7 +60,7 @@ func TestFileWatcher(t *testing.T) {
t.Fatalf("expected no events before writing a file") t.Fatalf("expected no events before writing a file")
case <-timeoutChan: case <-timeoutChan:
} }
ioutil.WriteFile(file.Name(), []byte{}, 0644) ioutil.WriteFile(f.Name(), []byte{}, file.ReadWriteByUser)
select { select {
case <-events: case <-events:
case <-timeoutChan: case <-timeoutChan:

View file

@ -20,15 +20,37 @@ WORKDIR /etc/nginx
RUN clean-install \ RUN clean-install \
diffutils \ diffutils \
dumb-init dumb-init \
libcap2-bin
COPY . /
RUN setcap cap_net_bind_service=+ep /usr/sbin/nginx \
&& setcap cap_net_bind_service=+ep /nginx-ingress-controller
RUN bash -eux -c ' \
writeDirs=( \
/etc/nginx \
/etc/ingress-controller/ssl \
/etc/ingress-controller/auth \
/var/log \
/var/log/nginx \
/opt/modsecurity/var/log \
/opt/modsecurity/var/upload \
/opt/modsecurity/var/audit \
); \
for dir in "${writeDirs[@]}"; do \
mkdir -p ${dir}; \
chown -R www-data.www-data ${dir}; \
done \
'
# Create symlinks to redirect nginx logs to stdout and stderr docker log collector # Create symlinks to redirect nginx logs to stdout and stderr docker log collector
# This only works if nginx is started with CMD or ENTRYPOINT # This only works if nginx is started with CMD or ENTRYPOINT
RUN mkdir -p /var/log/nginx \ RUN ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log && ln -sf /dev/stderr /var/log/nginx/error.log
COPY . / USER www-data
ENTRYPOINT ["/usr/bin/dumb-init"] ENTRYPOINT ["/usr/bin/dumb-init"]

View file

@ -1,6 +1,7 @@
local balancer_resty = require("balancer.resty") local balancer_resty = require("balancer.resty")
local resty_chash = require("resty.chash") local resty_chash = require("resty.chash")
local util = require("util") local util = require("util")
local split = require("util.split")
local _M = balancer_resty:new({ factory = resty_chash, name = "chash" }) local _M = balancer_resty:new({ factory = resty_chash, name = "chash" })
@ -15,7 +16,7 @@ end
function _M.balance(self) function _M.balance(self)
local key = util.lua_ngx_var(self.hash_by) local key = util.lua_ngx_var(self.hash_by)
local endpoint_string = self.instance:find(key) local endpoint_string = self.instance:find(key)
return util.split_pair(endpoint_string, ":") return split.split_pair(endpoint_string, ":")
end end
return _M return _M

View file

@ -7,6 +7,7 @@
local resty_lock = require("resty.lock") local resty_lock = require("resty.lock")
local util = require("util") local util = require("util")
local split = require("util.split")
local DECAY_TIME = 10 -- this value is in seconds local DECAY_TIME = 10 -- this value is in seconds
local LOCK_KEY = ":ewma_key" local LOCK_KEY = ":ewma_key"
@ -131,10 +132,10 @@ function _M.balance(self)
end end
function _M.after_balance(_) function _M.after_balance(_)
local response_time = tonumber(util.get_first_value(ngx.var.upstream_response_time)) or 0 local response_time = tonumber(split.get_first_value(ngx.var.upstream_response_time)) or 0
local connect_time = tonumber(util.get_first_value(ngx.var.upstream_connect_time)) or 0 local connect_time = tonumber(split.get_first_value(ngx.var.upstream_connect_time)) or 0
local rtt = connect_time + response_time local rtt = connect_time + response_time
local upstream = util.get_first_value(ngx.var.upstream_addr) local upstream = split.get_first_value(ngx.var.upstream_addr)
if util.is_blank(upstream) then if util.is_blank(upstream) then
return return

View file

@ -1,6 +1,7 @@
local balancer_resty = require("balancer.resty") local balancer_resty = require("balancer.resty")
local resty_roundrobin = require("resty.roundrobin") local resty_roundrobin = require("resty.roundrobin")
local util = require("util") local util = require("util")
local split = require("util.split")
local _M = balancer_resty:new({ factory = resty_roundrobin, name = "round_robin" }) local _M = balancer_resty:new({ factory = resty_roundrobin, name = "round_robin" })
@ -14,7 +15,7 @@ end
function _M.balance(self) function _M.balance(self)
local endpoint_string = self.instance:find() local endpoint_string = self.instance:find()
return util.split_pair(endpoint_string, ":") return split.split_pair(endpoint_string, ":")
end end
return _M return _M

View file

@ -1,6 +1,7 @@
local balancer_resty = require("balancer.resty") local balancer_resty = require("balancer.resty")
local resty_chash = require("resty.chash") local resty_chash = require("resty.chash")
local util = require("util") local util = require("util")
local split = require("util.split")
local ck = require("resty.cookie") local ck = require("resty.cookie")
local _M = balancer_resty:new({ factory = resty_chash, name = "sticky" }) local _M = balancer_resty:new({ factory = resty_chash, name = "sticky" })
@ -74,7 +75,7 @@ end
function _M.balance(self) function _M.balance(self)
local endpoint_string = sticky_endpoint_string(self) local endpoint_string = sticky_endpoint_string(self)
return util.split_pair(endpoint_string, ":") return split.split_pair(endpoint_string, ":")
end end
return _M return _M

View file

@ -0,0 +1,46 @@
local socket = ngx.socket.tcp
local cjson = require('cjson')
local defer = require('util.defer')
local assert = assert
local _M = {}
local function send_data(jsonData)
local s = assert(socket())
assert(s:connect('unix:/tmp/prometheus-nginx.socket'))
assert(s:send(jsonData))
assert(s:close())
end
function _M.encode_nginx_stats()
return cjson.encode({
host = ngx.var.host or "-",
status = ngx.var.status or "-",
remoteAddr = ngx.var.remote_addr or "-",
realIpAddr = ngx.var.realip_remote_addr or "-",
remoteUser = ngx.var.remote_user or "-",
bytesSent = tonumber(ngx.var.bytes_sent) or -1,
protocol = ngx.var.server_protocol or "-",
method = ngx.var.request_method or "-",
uri = ngx.var.uri or "-",
requestLength = tonumber(ngx.var.request_length) or -1,
requestTime = tonumber(ngx.var.request_time) or -1,
upstreamName = ngx.var.proxy_upstream_name or "-",
upstreamIP = ngx.var.upstream_addr or "-",
upstreamResponseTime = tonumber(ngx.var.upstream_response_time) or -1,
upstreamStatus = ngx.var.upstream_status or "-",
namespace = ngx.var.namespace or "-",
ingress = ngx.var.ingress_name or "-",
service = ngx.var.service_name or "-",
})
end
function _M.call()
local ok, err = defer.to_timer_phase(send_data, _M.encode_nginx_stats())
if not ok then
ngx.log(ngx.ERR, "failed to defer send_data to timer phase: ", err)
return
end
end
return _M

View file

@ -0,0 +1,20 @@
package.path = "./rootfs/etc/nginx/lua/?.lua;./rootfs/etc/nginx/lua/test/mocks/?.lua;" .. package.path
_G._TEST = true
local defer = require('util.defer')
local _ngx = {
shared = {},
log = function(...) end,
get_phase = function() return "timer" end,
}
_G.ngx = _ngx
describe("Defer", function()
describe("to_timer_phase", function()
it("executes passed callback immediately if called on timer phase", function()
defer.counter = 0
defer.to_timer_phase(function() defer.counter = defer.counter + 1 end)
assert.equal(defer.counter, 1)
end)
end)
end)

View file

@ -0,0 +1,122 @@
package.path = "./rootfs/etc/nginx/lua/?.lua;./rootfs/etc/nginx/lua/test/mocks/?.lua;" .. package.path
_G._TEST = true
local cjson = require('cjson')
local function udp_mock()
return {
setpeername = function(...) return true end,
send = function(payload) return payload end,
close = function(...) return true end
}
end
local _ngx = {
shared = {},
log = function(...) end,
socket = {
udp = udp_mock
},
get_phase = function() return "timer" end,
var = {}
}
_G.ngx = _ngx
describe("Monitor", function()
local monitor = require("monitor")
describe("encode_nginx_stats()", function()
it("successfuly encodes the current stats of nginx to JSON", function()
local nginx_environment = {
host = "testshop.com",
status = "200",
remote_addr = "10.10.10.10",
realip_remote_addr = "5.5.5.5",
remote_user = "admin",
bytes_sent = "150",
server_protocol = "HTTP",
request_method = "GET",
uri = "/admin",
request_length = "300",
request_time = "60",
proxy_upstream_name = "test-upstream",
upstream_addr = "2.2.2.2",
upstream_response_time = "200",
upstream_status = "220",
namespace = "test-app-production",
ingress_name = "web-yml",
service_name = "test-app",
}
ngx.var = nginx_environment
local encode_nginx_stats = monitor.encode_nginx_stats
local encoded_json_stats = encode_nginx_stats()
local decoded_json_stats = cjson.decode(encoded_json_stats)
local expected_json_stats = {
host = "testshop.com",
status = "200",
remoteAddr = "10.10.10.10",
realIpAddr = "5.5.5.5",
remoteUser = "admin",
bytesSent = 150.0,
protocol = "HTTP",
method = "GET",
uri = "/admin",
requestLength = 300.0,
requestTime = 60.0,
upstreamName = "test-upstream",
upstreamIP = "2.2.2.2",
upstreamResponseTime = 200,
upstreamStatus = "220",
namespace = "test-app-production",
ingress = "web-yml",
service = "test-app",
}
assert.are.same(decoded_json_stats,expected_json_stats)
end)
it("replaces empty numeric keys with -1 and missing string keys with -", function()
local nginx_environment = {
remote_addr = "10.10.10.10",
realip_remote_addr = "5.5.5.5",
remote_user = "francisco",
server_protocol = "HTTP",
request_method = "GET",
uri = "/admin",
request_time = "60",
proxy_upstream_name = "test-upstream",
upstream_addr = "2.2.2.2",
upstream_response_time = "200",
upstream_status = "220",
ingress_name = "web-yml",
}
ngx.var = nginx_environment
local encode_nginx_stats = monitor.encode_nginx_stats
local encoded_json_stats = encode_nginx_stats()
local decoded_json_stats = cjson.decode(encoded_json_stats)
local expected_json_stats = {
host = "-",
status = "-",
remoteAddr = "10.10.10.10",
realIpAddr = "5.5.5.5",
remoteUser = "francisco",
bytesSent = -1,
protocol = "HTTP",
method = "GET",
uri = "/admin",
requestLength = -1,
requestTime = 60.0,
upstreamName = "test-upstream",
upstreamIP = "2.2.2.2",
upstreamResponseTime = 200,
upstreamStatus = "220",
namespace = "-",
ingress = "web-yml",
service = "-",
}
assert.are.same(decoded_json_stats,expected_json_stats)
end)
end)
end)

View file

@ -49,17 +49,6 @@ function _M.lua_ngx_var(ngx_var)
return ngx.var[var_name] return ngx.var[var_name]
end end
function _M.split_pair(pair, seperator)
local i = pair:find(seperator)
if i == nil then
return pair, nil
else
local name = pair:sub(1, i - 1)
local value = pair:sub(i + 1, -1)
return name, value
end
end
-- this implementation is taken from -- this implementation is taken from
-- https://web.archive.org/web/20131225070434/http://snippets.luacode.org/snippets/Deep_Comparison_of_Two_Values_3 -- https://web.archive.org/web/20131225070434/http://snippets.luacode.org/snippets/Deep_Comparison_of_Two_Values_3
-- and modified for use in this project -- and modified for use in this project
@ -88,30 +77,6 @@ function _M.is_blank(str)
return str == nil or string_len(str) == 0 return str == nil or string_len(str) == 0
end end
-- http://nginx.org/en/docs/http/ngx_http_upstream_module.html#example
-- CAVEAT: nginx is giving out : instead of , so the docs are wrong
-- 127.0.0.1:26157 : 127.0.0.1:26157 , ngx.var.upstream_addr
-- 200 : 200 , ngx.var.upstream_status
-- 0.00 : 0.00, ngx.var.upstream_response_time
function _M.split_upstream_var(var)
if not var then
return nil, nil
end
local t = {}
for v in var:gmatch("[^%s|,]+") do
if v ~= ":" then
t[#t+1] = v
end
end
return t
end
function _M.get_first_value(var)
local t = _M.split_upstream_var(var) or {}
if #t == 0 then return nil end
return t[1]
end
-- this implementation is taken from: -- this implementation is taken from:
-- https://github.com/luafun/luafun/blob/master/fun.lua#L33 -- https://github.com/luafun/luafun/blob/master/fun.lua#L33
-- SHA: 04c99f9c393e54a604adde4b25b794f48104e0d0 -- SHA: 04c99f9c393e54a604adde4b25b794f48104e0d0
@ -130,4 +95,13 @@ local function deepcopy(orig)
end end
_M.deepcopy = deepcopy _M.deepcopy = deepcopy
local function tablelength(T)
local count = 0
for _ in pairs(T) do
count = count + 1
end
return count
end
_M.tablelength = tablelength
return _M return _M

View file

@ -0,0 +1,57 @@
local util = require("util")
local timer_started = false
local queue = {}
local MAX_QUEUE_SIZE = 10000
local _M = {}
local function flush_queue(premature)
-- TODO Investigate if we should actually still flush the queue when we're
-- shutting down.
if premature then return end
local current_queue = queue
queue = {}
timer_started = false
for _,v in ipairs(current_queue) do
v.func(unpack(v.args))
end
end
-- `to_timer_phase` will enqueue a function that will be executed in a timer
-- context, at a later point in time. The purpose is that some APIs (such as
-- sockets) are not available during some nginx request phases (such as the
-- logging phase), but are available for use in timers. There are no ordering
-- guarantees for when a function will be executed.
function _M.to_timer_phase(func, ...)
if ngx.get_phase() == "timer" then
func(...)
return true
end
if #queue >= MAX_QUEUE_SIZE then
ngx.log(ngx.ERR, "deferred timer queue full")
return nil, "deferred timer queue full"
end
table.insert(queue, { func = func, args = {...} })
if not timer_started then
local ok, err = ngx.timer.at(0, flush_queue)
if ok then
-- unfortunately this is to deal with tests - when running unit tests, we
-- dont actually run the timer, we call the function inline
if util.tablelength(queue) > 0 then
timer_started = true
end
else
local msg = "failed to create timer: " .. tostring(err)
ngx.log(ngx.ERR, msg)
return nil, msg
end
end
return true
end
return _M

View file

@ -0,0 +1,70 @@
local _M = {}
-- splits strings into host and port
local function parse_addr(addr)
local _, _, host, port = addr:find("([^:]+):([^:]+)")
if host and port then
return {host=host, port=port}
else
return nil, "error in parsing upstream address!"
end
end
function _M.get_first_value(var)
local t = _M.split_upstream_var(var) or {}
if #t == 0 then return nil end
return t[1]
end
function _M.split_pair(pair, seperator)
local i = pair:find(seperator)
if i == nil then
return pair, nil
else
local name = pair:sub(1, i - 1)
local value = pair:sub(i + 1, -1)
return name, value
end
end
-- http://nginx.org/en/docs/http/ngx_http_upstream_module.html#example
-- CAVEAT: nginx is giving out : instead of , so the docs are wrong
-- 127.0.0.1:26157 : 127.0.0.1:26157 , ngx.var.upstream_addr
-- 200 : 200 , ngx.var.upstream_status
-- 0.00 : 0.00, ngx.var.upstream_response_time
function _M.split_upstream_var(var)
if not var then
return nil, nil
end
local t = {}
for v in var:gmatch("[^%s|,]+") do
if v ~= ":" then
t[#t+1] = v
end
end
return t
end
-- Splits an NGINX $upstream_addr and returns an array of tables with a `host` and `port` key-value pair.
function _M.split_upstream_addr(addrs_str)
if not addrs_str then
return nil, nil
end
local addrs = _M.split_upstream_var(addrs_str)
local host_and_ports = {}
for _, v in ipairs(addrs) do
local a, err = parse_addr(v)
if err then
return nil, err
end
host_and_ports[#host_and_ports+1] = a
end
if #host_and_ports == 0 then
return nil, "no upstream addresses to parse!"
end
return host_and_ports
end
return _M

View file

@ -1,5 +1,5 @@
# A very simple nginx configuration file that forces nginx to start. # A very simple nginx configuration file that forces nginx to start.
pid /run/nginx.pid; pid /tmp/nginx.pid;
events {} events {}
http {} http {}

View file

@ -7,6 +7,11 @@
{{ $proxyHeaders := .ProxySetHeaders }} {{ $proxyHeaders := .ProxySetHeaders }}
{{ $addHeaders := .AddHeaders }} {{ $addHeaders := .AddHeaders }}
# Configuration checksum: {{ $all.Cfg.Checksum }}
# setup custom paths that do not require root access
pid /tmp/nginx.pid;
{{ if $cfg.EnableModsecurity }} {{ if $cfg.EnableModsecurity }}
load_module /etc/nginx/modules/ngx_http_modsecurity_module.so; load_module /etc/nginx/modules/ngx_http_modsecurity_module.so;
{{ end }} {{ end }}
@ -20,7 +25,6 @@ worker_processes {{ $cfg.WorkerProcesses }};
worker_cpu_affinity {{ $cfg.WorkerCpuAffinity }}; worker_cpu_affinity {{ $cfg.WorkerCpuAffinity }};
{{ end }} {{ end }}
pid /run/nginx.pid;
{{ if ne .MaxOpenFiles 0 }} {{ if ne .MaxOpenFiles 0 }}
worker_rlimit_nofile {{ .MaxOpenFiles }}; worker_rlimit_nofile {{ .MaxOpenFiles }};
{{ end }} {{ end }}
@ -67,6 +71,13 @@ http {
balancer = res balancer = res
end end
{{ end }} {{ end }}
ok, res = pcall(require, "monitor")
if not ok then
error("require failed: " .. tostring(res))
else
monitor = res
end
} }
{{ if $all.DynamicConfigurationEnabled }} {{ if $all.DynamicConfigurationEnabled }}
@ -97,11 +108,6 @@ http {
geoip_proxy_recursive on; geoip_proxy_recursive on;
{{ end }} {{ end }}
{{ if $cfg.EnableVtsStatus }}
vhost_traffic_status_zone shared:vhost_traffic_status:{{ $cfg.VtsStatusZoneSize }};
vhost_traffic_status_filter_by_set_key {{ $cfg.VtsDefaultFilterKey }};
{{ end }}
aio threads; aio threads;
aio_write on; aio_write on;
@ -115,6 +121,10 @@ http {
keepalive_timeout {{ $cfg.KeepAlive }}s; keepalive_timeout {{ $cfg.KeepAlive }}s;
keepalive_requests {{ $cfg.KeepAliveRequests }}; keepalive_requests {{ $cfg.KeepAliveRequests }};
client_body_temp_path /tmp/client-body;
fastcgi_temp_path /tmp/fastcgi-temp;
proxy_temp_path /tmp/proxy-temp;
client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }}; client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }};
client_header_timeout {{ $cfg.ClientHeaderTimeout }}s; client_header_timeout {{ $cfg.ClientHeaderTimeout }}s;
large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }}; large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }};
@ -182,6 +192,7 @@ http {
# $namespace # $namespace
# $ingress_name # $ingress_name
# $service_name # $service_name
# $service_port
log_format upstreaminfo {{ if $cfg.LogFormatEscapeJSON }}escape=json {{ end }}'{{ buildLogFormatUpstream $cfg }}'; log_format upstreaminfo {{ if $cfg.LogFormatEscapeJSON }}escape=json {{ end }}'{{ buildLogFormatUpstream $cfg }}';
{{/* map urls that should not appear in access.log */}} {{/* map urls that should not appear in access.log */}}
@ -360,7 +371,7 @@ http {
{{ range $name, $upstream := $backends }} {{ range $name, $upstream := $backends }}
{{ if eq $upstream.SessionAffinity.AffinityType "cookie" }} {{ if eq $upstream.SessionAffinity.AffinityType "cookie" }}
upstream sticky-{{ $upstream.Name }} { upstream sticky-{{ $upstream.Name }} {
sticky hash={{ $upstream.SessionAffinity.CookieSessionAffinity.Hash }} name={{ $upstream.SessionAffinity.CookieSessionAffinity.Name }} httponly; sticky hash={{ $upstream.SessionAffinity.CookieSessionAffinity.Hash }} name={{ $upstream.SessionAffinity.CookieSessionAffinity.Name }}{{if eq (len $upstream.SessionAffinity.CookieSessionAffinity.Locations) 1 }}{{ range $locationName, $locationPaths := $upstream.SessionAffinity.CookieSessionAffinity.Locations }}{{ if eq (len $locationPaths) 1 }} path={{ index $locationPaths 0 }}{{ end }}{{ end }}{{ end }} httponly;
{{ if (gt $cfg.UpstreamKeepaliveConnections 0) }} {{ if (gt $cfg.UpstreamKeepaliveConnections 0) }}
keepalive {{ $cfg.UpstreamKeepaliveConnections }}; keepalive {{ $cfg.UpstreamKeepaliveConnections }};
@ -529,14 +540,8 @@ http {
opentracing off; opentracing off;
{{ end }} {{ end }}
{{ if $cfg.EnableVtsStatus }}
vhost_traffic_status_display;
vhost_traffic_status_display_format html;
vhost_traffic_status_display_sum_key {{ $cfg.VtsSumKey }};
{{ else }}
access_log off; access_log off;
stub_status on; stub_status on;
{{ end }}
} }
{{ if $all.DynamicConfigurationEnabled }} {{ if $all.DynamicConfigurationEnabled }}
@ -593,7 +598,7 @@ stream {
{{ range $i, $tcpServer := .TCPBackends }} {{ range $i, $tcpServer := .TCPBackends }}
upstream tcp-{{ $tcpServer.Port }}-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }} { upstream tcp-{{ $tcpServer.Port }}-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }} {
{{ range $j, $endpoint := $tcpServer.Endpoints }} {{ range $j, $endpoint := $tcpServer.Endpoints }}
server {{ $endpoint.Address }}:{{ $endpoint.Port }}; server {{ $endpoint.Address | formatIP }}:{{ $endpoint.Port }};
{{ end }} {{ end }}
} }
server { server {
@ -622,7 +627,7 @@ stream {
{{ range $i, $udpServer := .UDPBackends }} {{ range $i, $udpServer := .UDPBackends }}
upstream udp-{{ $udpServer.Port }}-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }} { upstream udp-{{ $udpServer.Port }}-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }} {
{{ range $j, $endpoint := $udpServer.Endpoints }} {{ range $j, $endpoint := $udpServer.Endpoints }}
server {{ $endpoint.Address }}:{{ $endpoint.Port }}; server {{ $endpoint.Address | formatIP }}:{{ $endpoint.Port }};
{{ end }} {{ end }}
} }
@ -663,6 +668,7 @@ stream {
proxy_set_header X-Namespace $namespace; proxy_set_header X-Namespace $namespace;
proxy_set_header X-Ingress-Name $ingress_name; proxy_set_header X-Ingress-Name $ingress_name;
proxy_set_header X-Service-Name $service_name; proxy_set_header X-Service-Name $service_name;
proxy_set_header X-Service-Port $service_port;
rewrite (.*) / break; rewrite (.*) / break;
@ -833,6 +839,13 @@ stream {
{{ end }} {{ end }}
location {{ $path }} { location {{ $path }} {
{{ $ing := (getIngressInformation $location.Ingress $location.Path) }}
set $namespace "{{ $ing.Namespace }}";
set $ingress_name "{{ $ing.Rule }}";
set $service_name "{{ $ing.Service }}";
set $service_port "{{ $location.Port }}";
set $location_path "{{ $location.Path }}";
{{ if not $all.DisableLua }} {{ if not $all.DisableLua }}
rewrite_by_lua_block { rewrite_by_lua_block {
{{ if $all.DynamicConfigurationEnabled}} {{ if $all.DynamicConfigurationEnabled}}
@ -888,6 +901,8 @@ stream {
{{ if $all.DynamicConfigurationEnabled}} {{ if $all.DynamicConfigurationEnabled}}
balancer.log() balancer.log()
{{ end }} {{ end }}
monitor.call()
} }
{{ end }} {{ end }}
@ -908,16 +923,8 @@ stream {
port_in_redirect {{ if $location.UsePortInRedirects }}on{{ else }}off{{ end }}; port_in_redirect {{ if $location.UsePortInRedirects }}on{{ else }}off{{ end }};
{{ if $all.Cfg.EnableVtsStatus }}{{ if $location.VtsFilterKey }} vhost_traffic_status_filter_by_set_key {{ $location.VtsFilterKey }};{{ end }}{{ end }}
set $proxy_upstream_name "{{ buildUpstreamName $server.Hostname $all.Backends $location $all.DynamicConfigurationEnabled }}"; set $proxy_upstream_name "{{ buildUpstreamName $server.Hostname $all.Backends $location $all.DynamicConfigurationEnabled }}";
{{ $ing := (getIngressInformation $location.Ingress $location.Path) }}
{{/* $ing.Metadata contains the Ingress metadata */}}
set $namespace "{{ $ing.Namespace }}";
set $ingress_name "{{ $ing.Rule }}";
set $service_name "{{ $ing.Service }}";
{{/* redirect to HTTPS can be achieved forcing the redirect or having a SSL Certificate configured for the server */}} {{/* redirect to HTTPS can be achieved forcing the redirect or having a SSL Certificate configured for the server */}}
{{ if (or $location.Rewrite.ForceSSLRedirect (and (not (empty $server.SSLCert.PemFileName)) $location.Rewrite.SSLRedirect)) }} {{ if (or $location.Rewrite.ForceSSLRedirect (and (not (empty $server.SSLCert.PemFileName)) $location.Rewrite.SSLRedirect)) }}
{{ if not (isLocationInLocationList $location $all.Cfg.NoTLSRedirectLocations) }} {{ if not (isLocationInLocationList $location $all.Cfg.NoTLSRedirectLocations) }}
@ -1091,6 +1098,7 @@ stream {
proxy_set_header X-Namespace $namespace; proxy_set_header X-Namespace $namespace;
proxy_set_header X-Ingress-Name $ingress_name; proxy_set_header X-Ingress-Name $ingress_name;
proxy_set_header X-Service-Name $service_name; proxy_set_header X-Service-Name $service_name;
proxy_set_header X-Service-Port $service_port;
{{ end }} {{ end }}
{{ if not (empty $location.Backend) }} {{ if not (empty $location.Backend) }}

View file

@ -21,7 +21,6 @@
"bodySize": "1m", "bodySize": "1m",
"enableDynamicTlsRecords": true, "enableDynamicTlsRecords": true,
"enableSpdy": false, "enableSpdy": false,
"enableVtsStatus": true,
"errorLogLevel": "notice", "errorLogLevel": "notice",
"gzipTypes": "application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component", "gzipTypes": "application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component",
"hsts": true, "hsts": true,
@ -46,7 +45,6 @@
"useGzip": true, "useGzip": true,
"useHttp2": true, "useHttp2": true,
"proxyStreamTimeout": "600s", "proxyStreamTimeout": "600s",
"vtsStatusZoneSize": "10m",
"workerProcesses": 1, "workerProcesses": 1,
"limitConnZoneVariable": "$the_real_ip" "limitConnZoneVariable": "$the_real_ip"
}, },
@ -117,9 +115,7 @@
"keyFilename": "", "keyFilename": "",
"caFilename": "", "caFilename": "",
"pemSha": "" "pemSha": ""
}, }
"vtsDefaultFilterKey": "$uri $server_name"
}, { }, {
"path": "/", "path": "/",
"isDefBackend": true, "isDefBackend": true,

View file

@ -149,4 +149,117 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity", func() {
Expect(body).Should(ContainSubstring(fmt.Sprintf("request_uri=http://%v:8080/something/", host))) Expect(body).Should(ContainSubstring(fmt.Sprintf("request_uri=http://%v:8080/something/", host)))
Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("SERVERID=")) Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("SERVERID="))
}) })
It("should set the path to /something on the generated cookie", func() {
host := "example.com"
ing, err := f.EnsureIngress(&v1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: host,
Namespace: f.IngressController.Namespace,
Annotations: map[string]string{
"nginx.ingress.kubernetes.io/affinity": "cookie",
"nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID",
},
},
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
{
Host: host,
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
{
Path: "/something",
Backend: v1beta1.IngressBackend{
ServiceName: "http-svc",
ServicePort: intstr.FromInt(80),
},
},
},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
Expect(ing).NotTo(BeNil())
err = f.WaitForNginxServer(host,
func(server string) bool {
return strings.Contains(server, "proxy_pass http://sticky-"+f.IngressController.Namespace+"-http-svc-80;")
})
Expect(err).NotTo(HaveOccurred())
resp, _, errs := gorequest.New().
Get(f.IngressController.HTTPURL+"/something").
Set("Host", host).
End()
Expect(len(errs)).Should(BeNumerically("==", 0))
Expect(resp.StatusCode).Should(Equal(http.StatusOK))
Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Path=/something"))
})
It("should set the path to / on the generated cookie if there's more than one rule referring to the same backend", func() {
host := "example.com"
ing, err := f.EnsureIngress(&v1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: host,
Namespace: f.IngressController.Namespace,
Annotations: map[string]string{
"nginx.ingress.kubernetes.io/affinity": "cookie",
"nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID",
},
},
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
{
Host: host,
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
{
Path: "/something",
Backend: v1beta1.IngressBackend{
ServiceName: "http-svc",
ServicePort: intstr.FromInt(80),
},
},
{
Path: "/somewhereelese",
Backend: v1beta1.IngressBackend{
ServiceName: "http-svc",
ServicePort: intstr.FromInt(80),
},
},
},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
Expect(ing).NotTo(BeNil())
err = f.WaitForNginxServer(host,
func(server string) bool {
return strings.Contains(server, "proxy_pass http://sticky-"+f.IngressController.Namespace+"-http-svc-80;")
})
Expect(err).NotTo(HaveOccurred())
resp, _, errs := gorequest.New().
Get(f.IngressController.HTTPURL+"/something").
Set("Host", host).
End()
Expect(len(errs)).Should(BeNumerically("==", 0))
Expect(resp.StatusCode).Should(Equal(http.StatusOK))
Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Path=/;"))
})
}) })

View file

@ -33,6 +33,7 @@ import (
_ "k8s.io/ingress-nginx/test/e2e/annotations" _ "k8s.io/ingress-nginx/test/e2e/annotations"
_ "k8s.io/ingress-nginx/test/e2e/defaultbackend" _ "k8s.io/ingress-nginx/test/e2e/defaultbackend"
_ "k8s.io/ingress-nginx/test/e2e/lua" _ "k8s.io/ingress-nginx/test/e2e/lua"
_ "k8s.io/ingress-nginx/test/e2e/servicebackend"
_ "k8s.io/ingress-nginx/test/e2e/settings" _ "k8s.io/ingress-nginx/test/e2e/settings"
_ "k8s.io/ingress-nginx/test/e2e/ssl" _ "k8s.io/ingress-nginx/test/e2e/ssl"
) )

View file

@ -36,6 +36,15 @@ import (
"k8s.io/ingress-nginx/test/e2e/framework" "k8s.io/ingress-nginx/test/e2e/framework"
) )
const (
logDynamicConfigSuccess = "Dynamic reconfiguration succeeded"
logDynamicConfigFailure = "Dynamic reconfiguration failed"
logRequireBackendReload = "Configuration changes detected, backend reload required"
logBackendReloadSuccess = "Backend successfully reloaded"
logSkipBackendReload = "Changes handled by the dynamic configuration, skipping backend reload"
logInitialConfigSync = "Initial synchronization of the NGINX configuration"
)
var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() {
f := framework.NewDefaultFramework("dynamic-configuration") f := framework.NewDefaultFramework("dynamic-configuration")
@ -69,8 +78,8 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() {
log, err := f.NginxLogs() log, err := f.NginxLogs()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(log).ToNot(ContainSubstring("could not dynamically reconfigure")) Expect(log).ToNot(ContainSubstring(logDynamicConfigFailure))
Expect(log).To(ContainSubstring("first sync of Nginx configuration")) Expect(log).To(ContainSubstring(logDynamicConfigSuccess))
}) })
Context("when only backends change", func() { Context("when only backends change", func() {
@ -94,14 +103,14 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() {
restOfLogs := log[index:] restOfLogs := log[index:]
By("POSTing new backends to Lua endpoint") By("POSTing new backends to Lua endpoint")
Expect(restOfLogs).To(ContainSubstring("dynamic reconfiguration succeeded")) Expect(restOfLogs).To(ContainSubstring(logDynamicConfigSuccess))
Expect(restOfLogs).ToNot(ContainSubstring("could not dynamically reconfigure")) Expect(restOfLogs).ToNot(ContainSubstring(logDynamicConfigFailure))
By("skipping Nginx reload") By("skipping Nginx reload")
Expect(restOfLogs).ToNot(ContainSubstring("backend reload required")) Expect(restOfLogs).ToNot(ContainSubstring(logRequireBackendReload))
Expect(restOfLogs).ToNot(ContainSubstring("ingress backend successfully reloaded")) Expect(restOfLogs).ToNot(ContainSubstring(logBackendReloadSuccess))
Expect(restOfLogs).To(ContainSubstring("skipping reload")) Expect(restOfLogs).To(ContainSubstring(logSkipBackendReload))
Expect(restOfLogs).ToNot(ContainSubstring("first sync of Nginx configuration")) Expect(restOfLogs).ToNot(ContainSubstring(logInitialConfigSync))
}) })
It("should be able to update endpoints even when the update POST size(request body) > size(client_body_buffer_size)", func() { It("should be able to update endpoints even when the update POST size(request body) > size(client_body_buffer_size)", func() {
@ -164,14 +173,14 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() {
restOfLogs := log[index:] restOfLogs := log[index:]
By("POSTing new backends to Lua endpoint") By("POSTing new backends to Lua endpoint")
Expect(restOfLogs).To(ContainSubstring("dynamic reconfiguration succeeded")) Expect(restOfLogs).To(ContainSubstring(logDynamicConfigSuccess))
Expect(restOfLogs).ToNot(ContainSubstring("could not dynamically reconfigure")) Expect(restOfLogs).ToNot(ContainSubstring(logDynamicConfigFailure))
By("skipping Nginx reload") By("skipping Nginx reload")
Expect(restOfLogs).ToNot(ContainSubstring("backend reload required")) Expect(restOfLogs).ToNot(ContainSubstring(logRequireBackendReload))
Expect(restOfLogs).ToNot(ContainSubstring("ingress backend successfully reloaded")) Expect(restOfLogs).ToNot(ContainSubstring(logBackendReloadSuccess))
Expect(restOfLogs).To(ContainSubstring("skipping reload")) Expect(restOfLogs).To(ContainSubstring(logSkipBackendReload))
Expect(restOfLogs).ToNot(ContainSubstring("first sync of Nginx configuration")) Expect(restOfLogs).ToNot(ContainSubstring(logInitialConfigSync))
}) })
}) })
@ -208,10 +217,10 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() {
Expect(log).ToNot(BeEmpty()) Expect(log).ToNot(BeEmpty())
By("reloading Nginx") By("reloading Nginx")
Expect(log).To(ContainSubstring("ingress backend successfully reloaded")) Expect(log).To(ContainSubstring(logBackendReloadSuccess))
By("POSTing new backends to Lua endpoint") By("POSTing new backends to Lua endpoint")
Expect(log).To(ContainSubstring("dynamic reconfiguration succeeded")) Expect(log).To(ContainSubstring(logDynamicConfigSuccess))
By("still be proxying requests through Lua balancer") By("still be proxying requests through Lua balancer")
err = f.WaitForNginxServer("foo.com", err = f.WaitForNginxServer("foo.com",

View file

@ -0,0 +1,166 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package servicebackend
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/parnurzeal/gorequest"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/ingress-nginx/test/e2e/framework"
"strings"
)
var _ = framework.IngressNginxDescribe("Service backend - 503", func() {
f := framework.NewDefaultFramework("service-backend")
BeforeEach(func() {
})
AfterEach(func() {
})
It("should return 503 when backend service does not exist", func() {
host := "nonexistent.svc.com"
bi := buildIngressWithNonexistentService(host, f.IngressController.Namespace, "/")
ing, err := f.EnsureIngress(bi)
Expect(err).NotTo(HaveOccurred())
Expect(ing).NotTo(BeNil())
err = f.WaitForNginxServer(host,
func(server string) bool {
return strings.Contains(server, "return 503;")
})
Expect(err).NotTo(HaveOccurred())
resp, _, errs := gorequest.New().
Get(f.IngressController.HTTPURL).
Set("Host", host).
End()
Expect(len(errs)).Should(BeNumerically("==", 0))
Expect(resp.StatusCode).Should(Equal(503))
})
It("should return 503 when all backend service endpoints are unavailable", func() {
host := "unavailable.svc.com"
bi, bs := buildIngressWithUnavailableServiceEndpoints(host, f.IngressController.Namespace, "/")
svc, err := f.EnsureService(bs)
Expect(err).NotTo(HaveOccurred())
Expect(svc).NotTo(BeNil())
ing, err := f.EnsureIngress(bi)
Expect(err).NotTo(HaveOccurred())
Expect(ing).NotTo(BeNil())
err = f.WaitForNginxServer(host,
func(server string) bool {
return strings.Contains(server, "return 503;")
})
Expect(err).NotTo(HaveOccurred())
resp, _, errs := gorequest.New().
Get(f.IngressController.HTTPURL).
Set("Host", host).
End()
Expect(len(errs)).Should(BeNumerically("==", 0))
Expect(resp.StatusCode).Should(Equal(503))
})
})
func buildIngressWithNonexistentService(host, namespace, path string) *v1beta1.Ingress {
backendService := "nonexistent-svc"
return &v1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: host,
Namespace: namespace,
},
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
{
Host: host,
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
{
Path: path,
Backend: v1beta1.IngressBackend{
ServiceName: backendService,
ServicePort: intstr.FromInt(80),
},
},
},
},
},
},
},
},
}
}
func buildIngressWithUnavailableServiceEndpoints(host, namespace, path string) (*v1beta1.Ingress, *corev1.Service) {
backendService := "unavailable-svc"
return &v1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: host,
Namespace: namespace,
},
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
{
Host: host,
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
{
Path: path,
Backend: v1beta1.IngressBackend{
ServiceName: backendService,
ServicePort: intstr.FromInt(80),
},
},
},
},
},
},
},
},
}, &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: backendService,
Namespace: namespace,
},
Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{
{
Name: "tcp",
Port: 80,
TargetPort: intstr.FromInt(80),
Protocol: "TCP",
},
},
Selector: map[string]string{
"app": backendService,
},
},
}
}

View file

@ -0,0 +1,91 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package settings
import (
"regexp"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/ingress-nginx/test/e2e/framework"
)
var _ = framework.IngressNginxDescribe("Configmap change", func() {
f := framework.NewDefaultFramework("configmap-change")
BeforeEach(func() {
err := f.NewEchoDeployment()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
})
It("should reload after an update in the configuration", func() {
host := "configmap-change"
ing, err := f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil))
Expect(err).NotTo(HaveOccurred())
Expect(ing).NotTo(BeNil())
wlKey := "whitelist-source-range"
wlValue := "1.1.1.1"
By("adding a whitelist-source-range")
err = f.UpdateNginxConfigMapData(wlKey, wlValue)
Expect(err).NotTo(HaveOccurred())
checksumRegex := regexp.MustCompile("Configuration checksum:\\s+(\\d+)")
checksum := ""
err = f.WaitForNginxConfiguration(
func(cfg string) bool {
// before returning, extract the current checksum
match := checksumRegex.FindStringSubmatch(cfg)
if len(match) > 0 {
checksum = match[1]
}
return strings.Contains(cfg, "geo $the_real_ip $deny_") &&
strings.Contains(cfg, "1.1.1.1 0")
})
Expect(err).NotTo(HaveOccurred())
Expect(checksum).NotTo(BeEmpty())
By("changing error-log-level")
err = f.UpdateNginxConfigMapData("error-log-level", "debug")
Expect(err).NotTo(HaveOccurred())
newChecksum := ""
err = f.WaitForNginxConfiguration(
func(cfg string) bool {
match := checksumRegex.FindStringSubmatch(cfg)
if len(match) > 0 {
newChecksum = match[1]
}
return strings.ContainsAny(cfg, "error_log /var/log/nginx/error.log debug;")
})
Expect(err).NotTo(HaveOccurred())
Expect(checksum).NotTo(BeEquivalentTo(newChecksum))
})
})

View file

@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export NAMESPACE=$1 export NAMESPACE=$1
@ -22,3 +24,9 @@ echo "deploying NGINX Ingress controller in namespace $NAMESPACE"
sed "s@\${NAMESPACE}@${NAMESPACE}@" $DIR/../manifests/ingress-controller/mandatory.yaml | kubectl apply --namespace=$NAMESPACE -f - sed "s@\${NAMESPACE}@${NAMESPACE}@" $DIR/../manifests/ingress-controller/mandatory.yaml | kubectl apply --namespace=$NAMESPACE -f -
cat $DIR/../manifests/ingress-controller/service-nodeport.yaml | kubectl apply --namespace=$NAMESPACE -f - cat $DIR/../manifests/ingress-controller/service-nodeport.yaml | kubectl apply --namespace=$NAMESPACE -f -
# wait for the deployment and fail if there is an error before starting the execution of any test
kubectl rollout status \
--request-timeout=3m \
--namespace $NAMESPACE \
deployment nginx-ingress-controller

View file

@ -251,6 +251,14 @@ spec:
- --publish-service=$(POD_NAMESPACE)/ingress-nginx - --publish-service=$(POD_NAMESPACE)/ingress-nginx
- --annotations-prefix=nginx.ingress.kubernetes.io - --annotations-prefix=nginx.ingress.kubernetes.io
- --watch-namespace=${NAMESPACE} - --watch-namespace=${NAMESPACE}
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
# www-data -> 33
runAsUser: 33
env: env:
- name: POD_NAME - name: POD_NAME
valueFrom: valueFrom:
@ -284,5 +292,3 @@ spec:
periodSeconds: 10 periodSeconds: 10
successThreshold: 1 successThreshold: 1
timeoutSeconds: 1 timeoutSeconds: 1
securityContext:
privileged: true

21
vendor/github.com/mitchellh/hashstructure/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

65
vendor/github.com/mitchellh/hashstructure/README.md generated vendored Normal file
View file

@ -0,0 +1,65 @@
# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure)
hashstructure is a Go library for creating a unique hash value
for arbitrary values in Go.
This can be used to key values in a hash (for use in a map, set, etc.)
that are complex. The most common use case is comparing two values without
sending data across the network, caching values locally (de-dup), and so on.
## Features
* Hash any arbitrary Go value, including complex types.
* Tag a struct field to ignore it and not affect the hash value.
* Tag a slice type struct field to treat it as a set where ordering
doesn't affect the hash code but the field itself is still taken into
account to create the hash value.
* Optionally specify a custom hash function to optimize for speed, collision
avoidance for your data set, etc.
* Optionally hash the output of `.String()` on structs that implement fmt.Stringer,
allowing effective hashing of time.Time
## Installation
Standard `go get`:
```
$ go get github.com/mitchellh/hashstructure
```
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure).
A quick code example is shown below:
```go
type ComplexStruct struct {
Name string
Age uint
Metadata map[string]interface{}
}
v := ComplexStruct{
Name: "mitchellh",
Age: 64,
Metadata: map[string]interface{}{
"car": true,
"location": "California",
"siblings": []string{"Bob", "John"},
},
}
hash, err := hashstructure.Hash(v, nil)
if err != nil {
panic(err)
}
fmt.Printf("%d", hash)
// Output:
// 2307517237273902113
```

View file

@ -0,0 +1,358 @@
package hashstructure
import (
"encoding/binary"
"fmt"
"hash"
"hash/fnv"
"reflect"
)
// ErrNotStringer is returned when there's an error with hash:"string"
type ErrNotStringer struct {
Field string
}
// Error implements error for ErrNotStringer
func (ens *ErrNotStringer) Error() string {
return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field)
}
// HashOptions are options that are available for hashing.
type HashOptions struct {
// Hasher is the hash function to use. If this isn't set, it will
// default to FNV.
Hasher hash.Hash64
// TagName is the struct tag to look at when hashing the structure.
// By default this is "hash".
TagName string
// ZeroNil is flag determining if nil pointer should be treated equal
// to a zero value of pointed type. By default this is false.
ZeroNil bool
}
// Hash returns the hash value of an arbitrary value.
//
// If opts is nil, then default options will be used. See HashOptions
// for the default values. The same *HashOptions value cannot be used
// concurrently. None of the values within a *HashOptions struct are
// safe to read/write while hashing is being done.
//
// Notes on the value:
//
// * Unexported fields on structs are ignored and do not affect the
// hash value.
//
// * Adding an exported field to a struct with the zero value will change
// the hash value.
//
// For structs, the hashing can be controlled using tags. For example:
//
// struct {
// Name string
// UUID string `hash:"ignore"`
// }
//
// The available tag values are:
//
// * "ignore" or "-" - The field will be ignored and not affect the hash code.
//
// * "set" - The field will be treated as a set, where ordering doesn't
// affect the hash code. This only works for slices.
//
// * "string" - The field will be hashed as a string, only works when the
// field implements fmt.Stringer
//
func Hash(v interface{}, opts *HashOptions) (uint64, error) {
// Create default options
if opts == nil {
opts = &HashOptions{}
}
if opts.Hasher == nil {
opts.Hasher = fnv.New64()
}
if opts.TagName == "" {
opts.TagName = "hash"
}
// Reset the hash
opts.Hasher.Reset()
// Create our walker and walk the structure
w := &walker{
h: opts.Hasher,
tag: opts.TagName,
zeronil: opts.ZeroNil,
}
return w.visit(reflect.ValueOf(v), nil)
}
type walker struct {
h hash.Hash64
tag string
zeronil bool
}
type visitOpts struct {
// Flags are a bitmask of flags to affect behavior of this visit
Flags visitFlag
// Information about the struct containing this field
Struct interface{}
StructField string
}
func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
t := reflect.TypeOf(0)
// Loop since these can be wrapped in multiple layers of pointers
// and interfaces.
for {
// If we have an interface, dereference it. We have to do this up
// here because it might be a nil in there and the check below must
// catch that.
if v.Kind() == reflect.Interface {
v = v.Elem()
continue
}
if v.Kind() == reflect.Ptr {
if w.zeronil {
t = v.Type().Elem()
}
v = reflect.Indirect(v)
continue
}
break
}
// If it is nil, treat it like a zero.
if !v.IsValid() {
v = reflect.Zero(t)
}
// Binary writing can use raw ints, we have to convert to
// a sized-int, we'll choose the largest...
switch v.Kind() {
case reflect.Int:
v = reflect.ValueOf(int64(v.Int()))
case reflect.Uint:
v = reflect.ValueOf(uint64(v.Uint()))
case reflect.Bool:
var tmp int8
if v.Bool() {
tmp = 1
}
v = reflect.ValueOf(tmp)
}
k := v.Kind()
// We can shortcut numeric values by directly binary writing them
if k >= reflect.Int && k <= reflect.Complex64 {
// A direct hash calculation
w.h.Reset()
err := binary.Write(w.h, binary.LittleEndian, v.Interface())
return w.h.Sum64(), err
}
switch k {
case reflect.Array:
var h uint64
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
h = hashUpdateOrdered(w.h, h, current)
}
return h, nil
case reflect.Map:
var includeMap IncludableMap
if opts != nil && opts.Struct != nil {
if v, ok := opts.Struct.(IncludableMap); ok {
includeMap = v
}
}
// Build the hash for the map. We do this by XOR-ing all the key
// and value hashes. This makes it deterministic despite ordering.
var h uint64
for _, k := range v.MapKeys() {
v := v.MapIndex(k)
if includeMap != nil {
incl, err := includeMap.HashIncludeMap(
opts.StructField, k.Interface(), v.Interface())
if err != nil {
return 0, err
}
if !incl {
continue
}
}
kh, err := w.visit(k, nil)
if err != nil {
return 0, err
}
vh, err := w.visit(v, nil)
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
return h, nil
case reflect.Struct:
parent := v.Interface()
var include Includable
if impl, ok := parent.(Includable); ok {
include = impl
}
t := v.Type()
h, err := w.visit(reflect.ValueOf(t.Name()), nil)
if err != nil {
return 0, err
}
l := v.NumField()
for i := 0; i < l; i++ {
if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
var f visitFlag
fieldType := t.Field(i)
if fieldType.PkgPath != "" {
// Unexported
continue
}
tag := fieldType.Tag.Get(w.tag)
if tag == "ignore" || tag == "-" {
// Ignore this field
continue
}
// if string is set, use the string value
if tag == "string" {
if impl, ok := innerV.Interface().(fmt.Stringer); ok {
innerV = reflect.ValueOf(impl.String())
} else {
return 0, &ErrNotStringer{
Field: v.Type().Field(i).Name,
}
}
}
// Check if we implement includable and check it
if include != nil {
incl, err := include.HashInclude(fieldType.Name, innerV)
if err != nil {
return 0, err
}
if !incl {
continue
}
}
switch tag {
case "set":
f |= visitFlagSet
}
kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
if err != nil {
return 0, err
}
vh, err := w.visit(innerV, &visitOpts{
Flags: f,
Struct: parent,
StructField: fieldType.Name,
})
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
}
return h, nil
case reflect.Slice:
// We have two behaviors here. If it isn't a set, then we just
// visit all the elements. If it is a set, then we do a deterministic
// hash code.
var h uint64
var set bool
if opts != nil {
set = (opts.Flags & visitFlagSet) != 0
}
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
if set {
h = hashUpdateUnordered(h, current)
} else {
h = hashUpdateOrdered(w.h, h, current)
}
}
return h, nil
case reflect.String:
// Directly hash
w.h.Reset()
_, err := w.h.Write([]byte(v.String()))
return w.h.Sum64(), err
default:
return 0, fmt.Errorf("unknown kind to hash: %s", k)
}
}
func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
// For ordered updates, use a real hash function
h.Reset()
// We just panic if the binary writes fail because we are writing
// an int64 which should never be fail-able.
e1 := binary.Write(h, binary.LittleEndian, a)
e2 := binary.Write(h, binary.LittleEndian, b)
if e1 != nil {
panic(e1)
}
if e2 != nil {
panic(e2)
}
return h.Sum64()
}
func hashUpdateUnordered(a, b uint64) uint64 {
return a ^ b
}
// visitFlag is used as a bitmask for affecting visit behavior
type visitFlag uint
const (
visitFlagInvalid visitFlag = iota
visitFlagSet = iota << 1
)

15
vendor/github.com/mitchellh/hashstructure/include.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
package hashstructure
// Includable is an interface that can optionally be implemented by
// a struct. It will be called for each field in the struct to check whether
// it should be included in the hash.
type Includable interface {
HashInclude(field string, v interface{}) (bool, error)
}
// IncludableMap is an interface that can optionally be implemented by
// a struct. It will be called when a map-type field is found to ask the
// struct if the map item should be included in the hash.
type IncludableMap interface {
HashIncludeMap(field string, k, v interface{}) (bool, error)
}