diff --git a/.travis.yml b/.travis.yml index ca1d711f8..038ad2b62 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,7 +18,7 @@ notifications: on_success: never go: - - 1.10.2 + - 1.10.3 go_import_path: k8s.io/ingress-nginx @@ -40,10 +40,12 @@ jobs: script: - sudo luarocks install luacheck - make luacheck - - mkdir --parents $GOPATH/src/golang.org/x - && git clone --depth=1 https://go.googlesource.com/lint $GOPATH/src/golang.org/x/lint - && go get golang.org/x/lint/golint - - go get github.com/vbatts/git-validation + - | + go get -d golang.org/x/lint/golint + cd $GOPATH/src/golang.org/x/tools + git checkout release-branch.go1.10 + go install golang.org/x/lint/golint + cd - - make verify-all - stage: Lua Unit Test before_script: diff --git a/Gopkg.lock b/Gopkg.lock index a51286fc6..66af7405e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -207,6 +207,12 @@ packages = ["."] revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062" +[[projects]] + branch = "master" + name = "github.com/mitchellh/hashstructure" + packages = ["."] + revision = "2bca23e0e452137f789efbc8610126fd8b94f73b" + [[projects]] branch = "master" name = "github.com/mitchellh/mapstructure" @@ -900,6 +906,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "5feeef324f0cbac72e0234d5f649fc7c4233f4e2bb4477e454e047b5461d7569" + inputs-digest = "56ef61f651cca98e6dc7f7d25fd8dec603be3439bf91ba2e19838c5be1cbeea4" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Makefile b/Makefile index 6d42b939f..ee9ccba23 100644 --- a/Makefile +++ b/Makefile @@ -59,7 +59,7 @@ IMAGE = $(REGISTRY)/$(IMGNAME) MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) # Set default base image dynamically for each arch -BASEIMAGE?=quay.io/kubernetes-ingress-controller/nginx-$(ARCH):0.49 +BASEIMAGE?=quay.io/kubernetes-ingress-controller/nginx-$(ARCH):0.52 ifeq ($(ARCH),arm) QEMUARCH=arm diff --git a/OWNERS b/OWNERS index 6aa0e93bf..36ebfe254 100644 --- a/OWNERS +++ b/OWNERS @@ -4,6 +4,8 @@ approvers: - sig-network-leads - ingress-nginx-admins - ingress-nginx-maintainers + - ElvinEfendi + - antoineco reviewers: - aledbf diff --git a/cmd/nginx/flag_test.go b/cmd/nginx/flag_test.go index ca662e455..8215bfd18 100644 --- a/cmd/nginx/flag_test.go +++ b/cmd/nginx/flag_test.go @@ -34,12 +34,12 @@ func resetForTesting(usage func()) { func TestMandatoryFlag(t *testing.T) { _, _, err := parseFlags() if err == nil { - t.Fatalf("expected and error about default backend service") + t.Fatalf("Expected an error about default backend service") } } func TestDefaults(t *testing.T) { - resetForTesting(func() { t.Fatal("bad parse") }) + resetForTesting(func() { t.Fatal("Parsing failed") }) oldArgs := os.Args defer func() { os.Args = oldArgs }() @@ -47,15 +47,15 @@ func TestDefaults(t *testing.T) { showVersion, conf, err := parseFlags() if err != nil { - t.Fatalf("unexpected error parsing default flags: %v", err) + t.Fatalf("Unexpected error parsing default flags: %v", err) } if showVersion { - t.Fatal("expected false but true was returned for flag show-version") + t.Fatal("Expected flag \"show-version\" to be false") } if conf == nil { - t.Fatal("expected a configuration but nil returned") + t.Fatal("Expected a controller Configuration") } } diff --git a/cmd/nginx/flags.go b/cmd/nginx/flags.go index 80a806013..fe7986d86 100644 --- a/cmd/nginx/flags.go +++ b/cmd/nginx/flags.go @@ -21,7 +21,6 @@ import ( "fmt" "os" "runtime" - "time" "github.com/golang/glog" "github.com/spf13/pflag" @@ -39,101 +38,121 @@ func parseFlags() (bool, *controller.Configuration, error) { var ( flags = pflag.NewFlagSet("", pflag.ExitOnError) - apiserverHost = flags.String("apiserver-host", "", "The address of the Kubernetes Apiserver "+ - "to connect to in the format of protocol://address:port, e.g., "+ - "http://localhost:8080. If not specified, the assumption is that the binary runs inside a "+ - "Kubernetes cluster and local discovery is attempted.") - kubeConfigFile = flags.String("kubeconfig", "", "Path to kubeconfig file with authorization and master location information.") + apiserverHost = flags.String("apiserver-host", "", + `Address of the Kubernetes API server. +Takes the form "protocol://address:port". If not specified, it is assumed the +program runs inside a Kubernetes cluster and local discovery is attempted.`) + + kubeConfigFile = flags.String("kubeconfig", "", + `Path to a kubeconfig file containing authorization and API server information.`) defaultSvc = flags.String("default-backend-service", "", - `Service used to serve a 404 page for the default backend. Takes the form - namespace/name. The controller uses the first node port of this Service for - the default backend.`) + `Service used to serve HTTP requests not matching any known server name (catch-all). +Takes the form "namespace/name". The controller configures NGINX to forward +requests to the first port of this Service.`) ingressClass = flags.String("ingress-class", "", - `Name of the ingress class to route through this controller.`) + `Name of the ingress class this controller satisfies. +The class of an Ingress object is set using the annotation "kubernetes.io/ingress.class". +All ingress classes are satisfied if this parameter is left empty.`) configMap = flags.String("configmap", "", - `Name of the ConfigMap that contains the custom configuration to use`) + `Name of the ConfigMap containing custom global configurations for the controller.`) publishSvc = flags.String("publish-service", "", - `Service fronting the ingress controllers. Takes the form namespace/name. - The controller will set the endpoint records on the ingress objects to reflect those on the service.`) + `Service fronting the Ingress controller. +Takes the form "namespace/name". When used together with update-status, the +controller mirrors the address of this service's endpoints to the load-balancer +status of all Ingress objects it satisfies.`) tcpConfigMapName = flags.String("tcp-services-configmap", "", - `Name of the ConfigMap that contains the definition of the TCP services to expose. - The key in the map indicates the external port to be used. The value is the name of the - service with the format namespace/serviceName and the port of the service could be a - number of the name of the port. - The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend`) + `Name of the ConfigMap containing the definition of the TCP services to expose. +The key in the map indicates the external port to be used. The value is a +reference to a Service in the form "namespace/name:port", where "port" can +either be a port number or name. TCP ports 80 and 443 are reserved by the +controller for servicing HTTP traffic.`) udpConfigMapName = flags.String("udp-services-configmap", "", - `Name of the ConfigMap that contains the definition of the UDP services to expose. - The key in the map indicates the external port to be used. The value is the name of the - service with the format namespace/serviceName and the port of the service could be a - number of the name of the port.`) + `Name of the ConfigMap containing the definition of the UDP services to expose. +The key in the map indicates the external port to be used. The value is a +reference to a Service in the form "namespace/name:port", where "port" can +either be a port name or number.`) - resyncPeriod = flags.Duration("sync-period", 600*time.Second, - `Relist and confirm cloud resources this often. Default is 10 minutes`) + resyncPeriod = flags.Duration("sync-period", 0, + `Period at which the controller forces the repopulation of its local object stores. Disabled by default.`) watchNamespace = flags.String("watch-namespace", apiv1.NamespaceAll, - `Namespace to watch for Ingress. Default is to watch all namespaces`) + `Namespace the controller watches for updates to Kubernetes objects. +This includes Ingresses, Services and all configuration resources. All +namespaces are watched if this parameter is left empty.`) - profiling = flags.Bool("profiling", true, `Enable profiling via web interface host:port/debug/pprof/`) + profiling = flags.Bool("profiling", true, + `Enable profiling via web interface host:port/debug/pprof/`) - defSSLCertificate = flags.String("default-ssl-certificate", "", `Name of the secret - that contains a SSL certificate to be used as default for a HTTPS catch-all server. - Takes the form /.`) + defSSLCertificate = flags.String("default-ssl-certificate", "", + `Secret containing a SSL certificate to be used by the default HTTPS server (catch-all). +Takes the form "namespace/name".`) - defHealthzURL = flags.String("health-check-path", "/healthz", `Defines - the URL to be used as health check inside in the default server in NGINX.`) + defHealthzURL = flags.String("health-check-path", "/healthz", + `URL path of the health check endpoint. +Configured inside the NGINX status server. All requests received on the port +defined by the healthz-port parameter are forwarded internally to this path.`) - updateStatus = flags.Bool("update-status", true, `Indicates if the - ingress controller should update the Ingress status IP/hostname. Default is true`) + updateStatus = flags.Bool("update-status", true, + `Update the load-balancer status of Ingress objects this controller satisfies. +Requires setting the publish-service parameter to a valid Service reference.`) - electionID = flags.String("election-id", "ingress-controller-leader", `Election id to use for status update.`) + electionID = flags.String("election-id", "ingress-controller-leader", + `Election id to use for Ingress status updates.`) forceIsolation = flags.Bool("force-namespace-isolation", false, - `Force namespace isolation. This flag is required to avoid the reference of secrets or - configmaps located in a different namespace than the specified in the flag --watch-namespace.`) + `Force namespace isolation. +Prevents Ingress objects from referencing Secrets and ConfigMaps located in a +different namespace than their own. May be used together with watch-namespace.`) - updateStatusOnShutdown = flags.Bool("update-status-on-shutdown", true, `Indicates if the - ingress controller should update the Ingress status IP/hostname when the controller - is being stopped. Default is true`) + updateStatusOnShutdown = flags.Bool("update-status-on-shutdown", true, + `Update the load-balancer status of Ingress objects when the controller shuts down. +Requires the update-status parameter.`) - sortBackends = flags.Bool("sort-backends", false, `Defines if servers inside NGINX upstream should be sorted`) + sortBackends = flags.Bool("sort-backends", false, + `Sort servers inside NGINX upstreams.`) useNodeInternalIP = flags.Bool("report-node-internal-ip-address", false, - `Defines if the nodes IP address to be returned in the ingress status should be the internal instead of the external IP address`) + `Set the load-balancer status of Ingress objects to internal Node addresses instead of external. +Requires the update-status parameter.`) showVersion = flags.Bool("version", false, - `Shows release information about the NGINX Ingress controller`) + `Show release information about the NGINX Ingress controller and exit.`) - enableSSLPassthrough = flags.Bool("enable-ssl-passthrough", false, `Enable SSL passthrough feature. Default is disabled`) + enableSSLPassthrough = flags.Bool("enable-ssl-passthrough", false, + `Enable SSL Passthrough.`) - httpPort = flags.Int("http-port", 80, `Indicates the port to use for HTTP traffic`) - httpsPort = flags.Int("https-port", 443, `Indicates the port to use for HTTPS traffic`) - statusPort = flags.Int("status-port", 18080, `Indicates the TCP port to use for exposing the nginx status page`) - sslProxyPort = flags.Int("ssl-passtrough-proxy-port", 442, `Default port to use internally for SSL when SSL Passthgough is enabled`) - defServerPort = flags.Int("default-server-port", 8181, `Default port to use for exposing the default server (catch all)`) - healthzPort = flags.Int("healthz-port", 10254, "port for healthz endpoint.") - - annotationsPrefix = flags.String("annotations-prefix", "nginx.ingress.kubernetes.io", `Prefix of the ingress annotations.`) + annotationsPrefix = flags.String("annotations-prefix", "nginx.ingress.kubernetes.io", + `Prefix of the Ingress annotations specific to the NGINX controller.`) enableSSLChainCompletion = flags.Bool("enable-ssl-chain-completion", true, - `Defines if the nginx ingress controller should check the secrets for missing intermediate CA certificates. - If the certificate contain issues chain issues is not possible to enable OCSP. - Default is true.`) + `Autocomplete SSL certificate chains with missing intermediate CA certificates. +A valid certificate chain is required to enable OCSP stapling. Certificates +uploaded to Kubernetes must have the "Authority Information Access" X.509 v3 +extension for this to succeed.`) syncRateLimit = flags.Float32("sync-rate-limit", 0.3, `Define the sync frequency upper limit`) publishStatusAddress = flags.String("publish-status-address", "", - `User customized address to be set in the status of ingress resources. The controller will set the - endpoint records on the ingress using this address.`) + `Customized address to set as the load-balancer status of Ingress objects this controller satisfies. +Requires the update-status parameter.`) dynamicConfigurationEnabled = flags.Bool("enable-dynamic-configuration", false, - `When enabled controller will try to avoid Nginx reloads as much as possible by using Lua. Disabled by default.`) + `Dynamically refresh backends on topology changes instead of reloading NGINX. +Feature backed by OpenResty Lua libraries.`) + + httpPort = flags.Int("http-port", 80, `Port to use for servicing HTTP traffic.`) + httpsPort = flags.Int("https-port", 443, `Port to use for servicing HTTPS traffic.`) + statusPort = flags.Int("status-port", 18080, `Port to use for exposing NGINX status pages.`) + sslProxyPort = flags.Int("ssl-passtrough-proxy-port", 442, `Port to use internally for SSL Passthgough.`) + defServerPort = flags.Int("default-server-port", 8181, `Port to use for exposing the default server (catch-all).`) + healthzPort = flags.Int("healthz-port", 10254, "Port to use for the healthz endpoint.") ) flag.Set("logtostderr", "true") @@ -158,10 +177,10 @@ func parseFlags() (bool, *controller.Configuration, error) { } if *ingressClass != "" { - glog.Infof("Watching for ingress class: %s", *ingressClass) + glog.Infof("Watching for Ingress class: %s", *ingressClass) if *ingressClass != class.DefaultClass { - glog.Warningf("only Ingress with class \"%v\" will be processed by this ingress controller", *ingressClass) + glog.Warningf("Only Ingresses with class %q will be processed by this ingress controller", *ingressClass) } class.IngressClass = *ingressClass @@ -191,7 +210,7 @@ func parseFlags() (bool, *controller.Configuration, error) { } if !*enableSSLChainCompletion { - glog.Warningf("Check of SSL certificate chain is disabled (--enable-ssl-chain-completion=false)") + glog.Warningf("SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false)") } // LuaJIT is not available on arch s390x and ppc64le @@ -200,7 +219,7 @@ func parseFlags() (bool, *controller.Configuration, error) { disableLua = true if *dynamicConfigurationEnabled { *dynamicConfigurationEnabled = false - glog.Warningf("Disabling dynamic configuration feature (LuaJIT is not available in s390x and ppc64le)") + glog.Warningf("LuaJIT is not available on s390x and ppc64le architectures: disabling dynamic configuration feature.") } } diff --git a/cmd/nginx/main.go b/cmd/nginx/main.go index 446ba239d..e924b55b6 100644 --- a/cmd/nginx/main.go +++ b/cmd/nginx/main.go @@ -39,12 +39,25 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/ingress-nginx/internal/file" + "k8s.io/ingress-nginx/internal/ingress/annotations/class" "k8s.io/ingress-nginx/internal/ingress/controller" + "k8s.io/ingress-nginx/internal/ingress/metric/collector" "k8s.io/ingress-nginx/internal/k8s" "k8s.io/ingress-nginx/internal/net/ssl" "k8s.io/ingress-nginx/version" ) +const ( + // High enough QPS to fit all expected use cases. QPS=0 is not set here, because + // client code is overriding it. + defaultQPS = 1e6 + // High enough Burst to fit all expected use cases. Burst=0 is not set here, because + // client code is overriding it. + defaultBurst = 1e6 + + fakeCertificate = "default-fake-certificate" +) + func main() { rand.Seed(time.Now().UnixNano()) @@ -71,36 +84,33 @@ func main() { handleFatalInitError(err) } - ns, name, err := k8s.ParseNameNS(conf.DefaultService) + defSvcNs, defSvcName, err := k8s.ParseNameNS(conf.DefaultService) if err != nil { glog.Fatal(err) } - _, err = kubeClient.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) + _, err = kubeClient.CoreV1().Services(defSvcNs).Get(defSvcName, metav1.GetOptions{}) if err != nil { + // TODO (antoineco): compare with error types from k8s.io/apimachinery/pkg/api/errors if strings.Contains(err.Error(), "cannot get services in the namespace") { - glog.Fatalf("✖ It seems the cluster it is running with Authorization enabled (like RBAC) and there is no permissions for the ingress controller. Please check the configuration") + glog.Fatalf("✖ The cluster seems to be running with a restrictive Authorization mode and the Ingress controller does not have the required permissions to operate normally.") } - glog.Fatalf("no service with name %v found: %v", conf.DefaultService, err) + glog.Fatalf("No service with name %v found: %v", conf.DefaultService, err) } - glog.Infof("validated %v as the default backend", conf.DefaultService) + glog.Infof("Validated %v as the default backend.", conf.DefaultService) if conf.Namespace != "" { _, err = kubeClient.CoreV1().Namespaces().Get(conf.Namespace, metav1.GetOptions{}) if err != nil { - glog.Fatalf("no namespace with name %v found: %v", conf.Namespace, err) + glog.Fatalf("No namespace with name %v found: %v", conf.Namespace, err) } } - if conf.ResyncPeriod.Seconds() < 10 { - glog.Fatalf("resync period (%vs) is too low", conf.ResyncPeriod.Seconds()) - } - // create the default SSL certificate (dummy) defCert, defKey := ssl.GetFakeSSLCert() c, err := ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{}, fs) if err != nil { - glog.Fatalf("Error generating self signed certificate: %v", err) + glog.Fatalf("Error generating self-signed certificate: %v", err) } conf.FakeCertificatePath = c.PemFileName @@ -117,6 +127,17 @@ func main() { mux := http.NewServeMux() go registerHandlers(conf.EnableProfiling, conf.ListenPorts.Health, ngx, mux) + err = collector.InitNGINXStatusCollector(conf.Namespace, class.IngressClass, conf.ListenPorts.Status) + + if err != nil { + glog.Fatalf("Error creating metric collector: %v", err) + } + + err = collector.NewInstance(conf.Namespace, class.IngressClass) + if err != nil { + glog.Fatalf("Error creating unix socket server: %v", err) + } + ngx.Start() } @@ -130,24 +151,26 @@ func handleSigterm(ngx *controller.NGINXController, exit exiter) { exitCode := 0 if err := ngx.Stop(); err != nil { - glog.Infof("Error during shutdown %v", err) + glog.Infof("Error during shutdown: %v", err) exitCode = 1 } - glog.Infof("Handled quit, awaiting pod deletion") + glog.Infof("Handled quit, awaiting Pod deletion") time.Sleep(10 * time.Second) glog.Infof("Exiting with %v", exitCode) exit(exitCode) } -// createApiserverClient creates new Kubernetes Apiserver client. When kubeconfig or apiserverHost param is empty -// the function assumes that it is running inside a Kubernetes cluster and attempts to -// discover the Apiserver. Otherwise, it connects to the Apiserver specified. -// -// apiserverHost param is in the format of protocol://address:port/pathPrefix, e.g.http://localhost:8001. -// kubeConfig location of kubeconfig file -func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes.Clientset, error) { +// createApiserverClient creates a new Kubernetes REST client. apiserverHost is +// the URL of the API server in the format protocol://address:port/pathPrefix, +// kubeConfig is the location of a kubeconfig file. If defined, the kubeconfig +// file is loaded first, the URL of the API server read from the file is then +// optionally overriden by the value of apiserverHost. +// If neither apiserverHost or kubeconfig are passed in, we assume the +// controller runs inside Kubernetes and fallback to the in-cluster config. If +// the in-cluster config is missing or fails, we fallback to the default config. +func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Clientset, error) { cfg, err := clientcmd.BuildConfigFromFlags(apiserverHost, kubeConfig) if err != nil { return nil, err @@ -166,7 +189,7 @@ func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes var v *discovery.Info - // In some environments is possible the client cannot connect the API server in the first request + // The client may fail to connect to the API server in the first request. // https://github.com/kubernetes/ingress-nginx/issues/1968 defaultRetry := wait.Backoff{ Steps: 10, @@ -177,7 +200,7 @@ func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes var lastErr error retries := 0 - glog.V(2).Info("trying to discover Kubernetes version") + glog.V(2).Info("Trying to discover Kubernetes version") err = wait.ExponentialBackoff(defaultRetry, func() (bool, error) { v, err = client.Discovery().ServerVersion() @@ -186,48 +209,35 @@ func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes } lastErr = err - glog.V(2).Infof("unexpected error discovering Kubernetes version (attempt %v): %v", err, retries) + glog.V(2).Infof("Unexpected error discovering Kubernetes version (attempt %v): %v", err, retries) retries++ return false, nil }) - // err is not null only if there was a timeout in the exponential backoff (ErrWaitTimeout) + // err is returned in case of timeout in the exponential backoff (ErrWaitTimeout) if err != nil { return nil, lastErr } // this should not happen, warn the user if retries > 0 { - glog.Warningf("it was required to retry %v times before reaching the API server", retries) + glog.Warningf("Initial connection to the Kubernetes API server was retried %d times.", retries) } - glog.Infof("Running in Kubernetes Cluster version v%v.%v (%v) - git (%v) commit %v - platform %v", + glog.Infof("Running in Kubernetes cluster version v%v.%v (%v) - git (%v) commit %v - platform %v", v.Major, v.Minor, v.GitVersion, v.GitTreeState, v.GitCommit, v.Platform) return client, nil } -const ( - // High enough QPS to fit all expected use cases. QPS=0 is not set here, because - // client code is overriding it. - defaultQPS = 1e6 - // High enough Burst to fit all expected use cases. Burst=0 is not set here, because - // client code is overriding it. - defaultBurst = 1e6 - - fakeCertificate = "default-fake-certificate" -) - -/** - * Handles fatal init error that prevents server from doing any work. Prints verbose error - * messages and quits the server. - */ +// Handler for fatal init errors. Prints a verbose error message and exits. func handleFatalInitError(err error) { - glog.Fatalf("Error while initializing connection to Kubernetes apiserver. "+ - "This most likely means that the cluster is misconfigured (e.g., it has "+ - "invalid apiserver certificates or service accounts configuration). Reason: %s\n"+ + glog.Fatalf("Error while initiating a connection to the Kubernetes API server. "+ + "This could mean the cluster is misconfigured (e.g. it has invalid API server certificates "+ + "or Service Accounts configuration). Reason: %s\n"+ "Refer to the troubleshooting guide for more information: "+ - "https://github.com/kubernetes/ingress-nginx/blob/master/docs/troubleshooting.md", err) + "https://kubernetes.github.io/ingress-nginx/troubleshooting/", + err) } func registerHandlers(enableProfiling bool, port int, ic *controller.NGINXController, mux *http.ServeMux) { @@ -248,7 +258,7 @@ func registerHandlers(enableProfiling bool, port int, ic *controller.NGINXContro mux.HandleFunc("/stop", func(w http.ResponseWriter, r *http.Request) { err := syscall.Kill(syscall.Getpid(), syscall.SIGTERM) if err != nil { - glog.Errorf("unexpected error: %v", err) + glog.Errorf("Unexpected error: %v", err) } }) diff --git a/cmd/nginx/main_test.go b/cmd/nginx/main_test.go index 752e65e68..9128bdb55 100644 --- a/cmd/nginx/main_test.go +++ b/cmd/nginx/main_test.go @@ -33,15 +33,15 @@ func TestCreateApiserverClient(t *testing.T) { cli, err := createApiserverClient("", kubeConfigFile) if err != nil { - t.Fatalf("unexpected error creating api server client: %v", err) + t.Fatalf("Unexpected error creating Kubernetes REST client: %v", err) } if cli == nil { - t.Fatalf("expected a kubernetes client but none returned") + t.Fatal("Expected a REST client but none returned.") } _, err = createApiserverClient("", "") if err == nil { - t.Fatalf("expected an error creating api server client without an api server URL or kubeconfig file") + t.Fatal("Expected an error creating REST client without an API server URL or kubeconfig file.") } } @@ -51,7 +51,7 @@ func TestHandleSigterm(t *testing.T) { cli, err := createApiserverClient("", kubeConfigFile) if err != nil { - t.Fatalf("unexpected error creating api server client: %v", err) + t.Fatalf("Unexpected error creating Kubernetes REST client: %v", err) } resetForTesting(func() { t.Fatal("bad parse") }) @@ -67,20 +67,20 @@ func TestHandleSigterm(t *testing.T) { _, conf, err := parseFlags() if err != nil { - t.Errorf("unexpected error creating NGINX controller: %v", err) + t.Errorf("Unexpected error creating NGINX controller: %v", err) } conf.Client = cli fs, err := file.NewFakeFS() if err != nil { - t.Fatalf("unexpected error: %v", err) + t.Fatalf("Unexpected error: %v", err) } ngx := controller.NewNGINXController(conf, fs) go handleSigterm(ngx, func(code int) { if code != 1 { - t.Errorf("expected exit code 1 but %v received", code) + t.Errorf("Expected exit code 1 but %d received", code) } return @@ -88,12 +88,13 @@ func TestHandleSigterm(t *testing.T) { time.Sleep(1 * time.Second) - t.Logf("sending SIGTERM to process PID %v", syscall.Getpid()) + t.Logf("Sending SIGTERM to PID %d", syscall.Getpid()) err = syscall.Kill(syscall.Getpid(), syscall.SIGTERM) if err != nil { - t.Errorf("unexpected error sending SIGTERM signal") + t.Error("Unexpected error sending SIGTERM signal.") } } func TestRegisterHandlers(t *testing.T) { + // TODO } diff --git a/docs/examples/auth/oauth-external-auth/oauth2-proxy.yaml b/docs/examples/auth/oauth-external-auth/oauth2-proxy.yaml index ed367b563..82549f40e 100644 --- a/docs/examples/auth/oauth-external-auth/oauth2-proxy.yaml +++ b/docs/examples/auth/oauth-external-auth/oauth2-proxy.yaml @@ -28,7 +28,7 @@ spec: value: - name: OAUTH2_PROXY_CLIENT_SECRET value: - # python -c 'import os,base64; print base64.b64encode(os.urandom(16))' + # docker run -ti --rm python:3-alpine python -c 'import secrets,base64; print(base64.b64encode(base64.b64encode(secrets.token_bytes(16))));' - name: OAUTH2_PROXY_COOKIE_SECRET value: SECRET image: docker.io/colemickens/oauth2_proxy:latest diff --git a/docs/examples/customization/custom-errors/README.md b/docs/examples/customization/custom-errors/README.md index 45c2db5fd..d2738e499 100644 --- a/docs/examples/customization/custom-errors/README.md +++ b/docs/examples/customization/custom-errors/README.md @@ -1,82 +1,83 @@ # Custom Errors -This example shows how is possible to use a custom backend to render custom error pages. The code of this example is located here [custom-error-pages](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-errors) +This example demonstrates how to use a custom backend to render custom error pages. +## Customized default backend -The idea is to use the headers `X-Code` and `X-Format` that NGINX pass to the backend in case of an error to find out the best existent representation of the response to be returned. i.e. if the request contains an `Accept` header of type `json` the error should be in that format and not in `html` (the default in NGINX). - -First create the custom backend to use in the Ingress controller +First, create the custom `default-backend`. It will be used by the Ingress controller later on. ``` $ kubectl create -f custom-default-backend.yaml service "nginx-errors" created -replicationcontroller "nginx-errors" created +deployment.apps "nginx-errors" created ``` -``` -$ kubectl get svc -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -echoheaders 10.3.0.7 nodes 80/TCP 23d -kubernetes 10.3.0.1 443/TCP 34d -nginx-errors 10.3.0.102 80/TCP 11s -``` +This should have created a Deployment and a Service with the name `nginx-errors`. ``` -$ kubectl get rc -CONTROLLER REPLICAS AGE -echoheaders 1 19d -nginx-errors 1 19s +$ kubectl get deploy,svc +NAME DESIRED CURRENT READY AGE +deployment.apps/nginx-errors 1 1 1 10s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/nginx-errors ClusterIP 10.0.0.12 80/TCP 10s ``` -Next create the Ingress controller executing -``` -$ kubectl create -f rc-custom-errors.yaml -``` +## Ingress controller configuration -Now to check if this is working we use curl: +If you do not already have an instance of the the NGINX Ingress controller running, deploy it according to the +[deployment guide][deploy], then follow these steps: + +1. Edit the `nginx-ingress-controller` Deployment and set the value of the `--default-backend` flag to the name of the + newly created error backend. + +2. Edit the `nginx-configuration` ConfigMap and create the key `custom-http-errors` with a value of `404,503`. + +3. Take note of the IP address assigned to the NGINX Ingress controller Service. + ``` + $ kubectl get svc ingress-nginx + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + ingress-nginx ClusterIP 10.0.0.13 80/TCP,443/TCP 10m + ``` + +!!! Note + The `ingress-nginx` Service is of type `ClusterIP` in this example. This may vary depending on your environment. + Make sure you can use the Service to reach NGINX before proceeding with the rest of this example. + +[deploy]: ../../../deploy/ + +## Testing error pages + +Let us send a couple of HTTP requests using cURL and validate everything is working as expected. + +A request to the default backend returns a 404 error with a custom message: ``` -$ curl -v http://172.17.4.99/ -* Trying 172.17.4.99... -* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) -> GET / HTTP/1.1 -> Host: 172.17.4.99 -> User-Agent: curl/7.43.0 -> Accept: */* -> -< HTTP/1.1 404 Not Found -< Server: nginx/1.10.0 -< Date: Wed, 04 May 2016 02:53:45 GMT -< Content-Type: text/html -< Transfer-Encoding: chunked -< Connection: keep-alive -< Vary: Accept-Encoding -< +$ curl -D- http://10.0.0.13/ +HTTP/1.1 404 Not Found +Server: nginx/1.13.12 +Date: Tue, 12 Jun 2018 19:11:24 GMT +Content-Type: */* +Transfer-Encoding: chunked +Connection: keep-alive + The page you're looking for could not be found. - -* Connection #0 to host 172.17.4.99 left intact ``` -Specifying json as expected format: +A request with a custom `Accept` header returns the corresponding document type (JSON): ``` -$ curl -v http://172.17.4.99/ -H 'Accept: application/json' -* Trying 172.17.4.99... -* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) -> GET / HTTP/1.1 -> Host: 172.17.4.99 -> User-Agent: curl/7.43.0 -> Accept: application/json -> -< HTTP/1.1 404 Not Found -< Server: nginx/1.10.0 -< Date: Wed, 04 May 2016 02:54:00 GMT -< Content-Type: text/html -< Transfer-Encoding: chunked -< Connection: keep-alive -< Vary: Accept-Encoding -< +$ curl -D- -H 'Accept: application/json' http://10.0.0.13/ +HTTP/1.1 404 Not Found +Server: nginx/1.13.12 +Date: Tue, 12 Jun 2018 19:12:36 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +Vary: Accept-Encoding + { "message": "The page you're looking for could not be found" } - -* Connection #0 to host 172.17.4.99 left intact ``` + +To go further with this example, feel free to deploy your own applications and Ingress objects, and validate that the +responses are still in the correct format when a backend returns 503 (eg. if you scale a Deployment down to 0 replica). diff --git a/docs/examples/customization/custom-errors/custom-default-backend.yaml b/docs/examples/customization/custom-errors/custom-default-backend.yaml index fce7c0bcb..cc97c8c1f 100644 --- a/docs/examples/customization/custom-errors/custom-default-backend.yaml +++ b/docs/examples/customization/custom-errors/custom-default-backend.yaml @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: Service metadata: @@ -5,27 +6,35 @@ metadata: labels: app: nginx-errors spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http selector: app: nginx-errors + ports: + - port: 80 + targetPort: 8080 + name: http --- -apiVersion: v1 -kind: ReplicationController +apiVersion: apps/v1beta2 +kind: Deployment +apiVersion: apps/v1beta2 metadata: name: nginx-errors spec: replicas: 1 + selector: + matchLabels: + app: nginx-errors template: metadata: labels: app: nginx-errors spec: containers: - - name: nginx-errors - image: aledbf/nginx-error-server:0.1 + - name: nginx-error-server + image: quay.io/kubernetes-ingress-controller/custom-error-pages-amd64:0.3 ports: - - containerPort: 80 \ No newline at end of file + - containerPort: 8080 + # Setting the environment variable DEBUG we can see the headers sent + # by the ingress controller to the backend in the client response. + # env: + # - name: DEBUG + # value: "true" diff --git a/docs/examples/customization/custom-errors/rc-custom-errors.yaml b/docs/examples/customization/custom-errors/rc-custom-errors.yaml deleted file mode 100644 index c0befcc49..000000000 --- a/docs/examples/customization/custom-errors/rc-custom-errors.yaml +++ /dev/null @@ -1,53 +0,0 @@ -apiVersion: v1 -kind: ReplicationController -metadata: - name: nginx-ingress-controller - labels: - k8s-app: nginx-ingress-lb -spec: - replicas: 1 - selector: - k8s-app: nginx-ingress-lb - template: - metadata: - labels: - k8s-app: nginx-ingress-lb - name: nginx-ingress-lb - spec: - terminationGracePeriodSeconds: 60 - containers: - - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.15.0 - name: nginx-ingress-lb - imagePullPolicy: Always - readinessProbe: - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - livenessProbe: - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - timeoutSeconds: 1 - # use downward API - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - ports: - - containerPort: 80 - hostPort: 80 - - containerPort: 443 - hostPort: 443 - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/nginx-errors - securityContext: - runAsNonRoot: false diff --git a/docs/examples/customization/custom-upstream-check/README.md b/docs/examples/customization/custom-upstream-check/README.md index 434c47c57..0b19338d7 100644 --- a/docs/examples/customization/custom-upstream-check/README.md +++ b/docs/examples/customization/custom-upstream-check/README.md @@ -42,6 +42,3 @@ $ kubectl exec nginx-ingress-controller-v1ppm cat /etc/nginx/nginx.conf } .... ``` - - -![nginx-module-vts](custom-upstream.png "screenshot with custom configuration") diff --git a/docs/examples/customization/custom-upstream-check/custom-upstream.png b/docs/examples/customization/custom-upstream-check/custom-upstream.png deleted file mode 100644 index 30417894b..000000000 Binary files a/docs/examples/customization/custom-upstream-check/custom-upstream.png and /dev/null differ diff --git a/docs/examples/customization/custom-vts-metrics-prometheus/README.md b/docs/examples/customization/custom-vts-metrics-prometheus/README.md deleted file mode 100644 index c85213905..000000000 --- a/docs/examples/customization/custom-vts-metrics-prometheus/README.md +++ /dev/null @@ -1,103 +0,0 @@ -# Custom VTS metrics with Prometheus - -This example aims to demonstrate the deployment of an nginx ingress controller and use a ConfigMap to enable [nginx vts module](https://github.com/vozlt/nginx-module-vts -) to export metrics in prometheus format. - -## vts-metrics - -Vts-metrics export NGINX metrics. To deploy all the files simply run `kubectl apply -f nginx`. A deployment and service will be -created which already has a `prometheus.io/scrape: 'true'` annotation and if you added -the recommended Prometheus service-endpoint scraping [configuration](https://raw.githubusercontent.com/prometheus/prometheus/master/documentation/examples/prometheus-kubernetes.yml), -Prometheus will scrape it automatically and you start using the generated metrics right away. - -## Custom configuration - -```console -apiVersion: v1 -data: - enable-vts-status: "true" -kind: ConfigMap -metadata: - name: nginx-configuration - namespace: ingress-nginx - labels: - app: ingress-nginx -``` - -```console -$ kubectl apply -f nginx-vts-metrics-conf.yaml -``` - -## Result - -Check whether the ingress controller successfully generated the NGINX vts status: - -```console -$ kubectl exec nginx-ingress-controller-873061567-4n3k2 -n ingress-nginx cat /etc/nginx/nginx.conf|grep vhost_traffic_status_display - vhost_traffic_status_display; - vhost_traffic_status_display_format html; -``` - -### NGINX vts dashboard - -The vts dashboard provides real time metrics. - -![vts dashboard](imgs/vts-dashboard.png) - -Because the vts port it's not yet exposed, you should forward the controller port to see it. - -```console -$ kubectl port-forward $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n ingress-nginx --output=jsonpath={.items..metadata.name}) -n ingress-nginx 18080 -``` - -Now open the url [http://localhost:18080/nginx_status](http://localhost:18080/nginx_status) in your browser. - -### Prometheus metrics output - -NGINX Ingress controller already has a parser to convert vts metrics to Prometheus format. It exports prometheus metrics to the address `:10254/metrics`. - -```console -$ kubectl exec -ti -n ingress-nginx $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n kube-system --output=jsonpath={.items..metadata.name}) curl localhost:10254/metrics -ingress_controller_ssl_expire_time_seconds{host="foo.bar.com"} -6.21355968e+10 -# HELP ingress_controller_success Cumulative number of Ingress controller reload operations -# TYPE ingress_controller_success counter -ingress_controller_success{count="reloads"} 3 -# HELP nginx_bytes_total Nginx bytes count -# TYPE nginx_bytes_total counter -nginx_bytes_total{direction="in",ingress_class="nginx",namespace="",server_zone="*"} 3708 -nginx_bytes_total{direction="in",ingress_class="nginx",namespace="",server_zone="_"} 3708 -nginx_bytes_total{direction="out",ingress_class="nginx",namespace="",server_zone="*"} 5256 -nginx_bytes_total{direction="out",ingress_class="nginx",namespace="",server_zone="_"} 5256 -``` - -### Customize metrics - -The default [vts vhost key](https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key) is `$geoip_country_code country::*` that expose metrics grouped by server and country code. The example below show how to have metrics grouped by server and server path. - -![vts dashboard](imgs/vts-dashboard-filter-key-path.png) - -## NGINX custom configuration ( http level ) - -``` - apiVersion: v1 - kind: ConfigMap - data: - enable-vts-status: "true" - vts-default-filter-key: "$server_name" -... -``` - -## Customize ingress - -``` - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - annotations: - nginx.ingress.kubernetes.io/vts-filter-key: $uri $server_name - name: ingress -``` - -## Result - -![prometheus filter key path](imgs/prometheus-filter-key-path.png) diff --git a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/prometheus-filter-key-path.png b/docs/examples/customization/custom-vts-metrics-prometheus/imgs/prometheus-filter-key-path.png deleted file mode 100644 index a266d4048..000000000 Binary files a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/prometheus-filter-key-path.png and /dev/null differ diff --git a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard-filter-key-path.png b/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard-filter-key-path.png deleted file mode 100644 index b9b3238f6..000000000 Binary files a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard-filter-key-path.png and /dev/null differ diff --git a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard.png b/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard.png deleted file mode 100644 index 0370f5ce9..000000000 Binary files a/docs/examples/customization/custom-vts-metrics-prometheus/imgs/vts-dashboard.png and /dev/null differ diff --git a/docs/examples/customization/custom-vts-metrics-prometheus/nginx-vts-metrics-conf.yaml b/docs/examples/customization/custom-vts-metrics-prometheus/nginx-vts-metrics-conf.yaml deleted file mode 100644 index 6a6e795cd..000000000 --- a/docs/examples/customization/custom-vts-metrics-prometheus/nginx-vts-metrics-conf.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -data: - enable-vts-status: "true" -kind: ConfigMap -metadata: - name: nginx-configuration - namespace: ingress-nginx - labels: - app: ingress-nginx diff --git a/docs/examples/index.md b/docs/examples/index.md index feec494cd..df1d01d5e 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -13,10 +13,9 @@ Auth | [OAuth external auth](auth/oauth-external-auth/README.md) | TODO | TODO Customization | [Configuration snippets](customization/configuration-snippets/README.md) | customize nginx location configuration using annotations | Advanced Customization | [Custom configuration](customization/custom-configuration/README.md) | TODO | TODO Customization | [Custom DH parameters for perfect forward secrecy](customization/ssl-dh-param/README.md) | TODO | TODO -Customization | [Custom errors](customization/custom-errors/README.md) | TODO | TODO +Customization | [Custom errors](customization/custom-errors/README.md) | serve custom error pages from the default backend | Intermediate Customization | [Custom headers](customization/custom-headers/README.md) | set custom headers before sending traffic to backends | Advanced Customization | [Custom upstream check](customization/custom-upstream-check/README.md) | TODO | TODO -Customization | [Custom VTS metrics with Prometheus](customization/custom-vts-metrics-prometheus/README.md) | TODO | TODO Customization | [External authentication with response header propagation](customization/external-auth-headers/README.md) | TODO | TODO Customization | [Sysctl tuning](customization/sysctl/README.md) | TODO | TODO Features | [Rewrite](rewrite/README.md) | TODO | TODO diff --git a/docs/user-guide/cli-arguments.md b/docs/user-guide/cli-arguments.md index 91707fd4b..fac879b32 100644 --- a/docs/user-guide/cli-arguments.md +++ b/docs/user-guide/cli-arguments.md @@ -1,48 +1,48 @@ # Command line arguments -The following command line arguments are accepted by the main controller executable. +The following command line arguments are accepted by the Ingress controller executable. -They are set in the container spec of the `nginx-ingress-controller` Deployment object (see `deploy/with-rbac.yaml` or `deploy/without-rbac.yaml`). +They are set in the container spec of the `nginx-ingress-controller` Deployment manifest (see `deploy/with-rbac.yaml` or `deploy/without-rbac.yaml`). | Argument | Description | |----------|-------------| -| `--alsologtostderr` | log to standard error as well as files | -| `--annotations-prefix string` | Prefix of the ingress annotations. (default "nginx.ingress.kubernetes.io") | -| `--apiserver-host string` | The address of the Kubernetes Apiserver to connect to in the format of protocol://address:port, e.g., http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted. | -| `--configmap string` | Name of the ConfigMap that contains the custom configuration to use | -| `--default-backend-service string` | Service used to serve a 404 page for the default backend. Takes the form namespace/name. The controller uses the first node port of this Service for the default backend. | -| `--default-server-port int` | Default port to use for exposing the default server (catch all) (default 8181) | -| `--default-ssl-certificate string` | Name of the secret that contains a SSL certificate to be used as default for a HTTPS catch-all server. Takes the form /. | -| `--election-id string` | Election id to use for status update. (default "ingress-controller-leader") | -| `--enable-dynamic-configuration` | When enabled controller will try to avoid Nginx reloads as much as possible by using Lua. Disabled by default. | -| `--enable-ssl-chain-completion` | Defines if the nginx ingress controller should check the secrets for missing intermediate CA certificates. If the certificate contain issues chain issues is not possible to enable OCSP. Default is true. (default true) | -| `--enable-ssl-passthrough` | Enable SSL passthrough feature. Default is disabled | -| `--force-namespace-isolation` | Force namespace isolation. This flag is required to avoid the reference of secrets or configmaps located in a different namespace than the specified in the flag --watch-namespace. | -| `--health-check-path string` | Defines the URL to be used as health check inside in the default server in NGINX. (default "/healthz") | -| `--healthz-port int` | port for healthz endpoint. (default 10254) | -| `--http-port int` | Indicates the port to use for HTTP traffic (default 80) | -| `--https-port int` | Indicates the port to use for HTTPS traffic (default 443) | -| `--ingress-class string` | Name of the ingress class to route through this controller. | -| `--kubeconfig string` | Path to kubeconfig file with authorization and master location information. | -| `--log_backtrace_at traceLocation` | when logging hits line file:N, emit a stack trace (default :0) | -| `--log_dir string` | If non-empty, write log files in this directory | -| `--logtostderr` | log to standard error instead of files (default true) | -| `--profiling` | Enable profiling via web interface host:port/debug/pprof/ (default true) | -| `--publish-service string` | Service fronting the ingress controllers. Takes the form namespace/name. The controller will set the endpoint records on the ingress objects to reflect those on the service. | -| `--publish-status-address string` | User customized address to be set in the status of ingress resources. The controller will set the endpoint records on the ingress using this address. | -| `--report-node-internal-ip-address` | Defines if the nodes IP address to be returned in the ingress status should be the internal instead of the external IP address | -| `--sort-backends` | Defines if backends and its endpoints should be sorted | -| `--ssl-passtrough-proxy-port int` | Default port to use internally for SSL when SSL Passthgough is enabled (default 442) | -| `--status-port int` | Indicates the TCP port to use for exposing the nginx status page (default 18080) | -| `--stderrthreshold severity` | logs at or above this threshold go to stderr (default 2) | -| `--sync-period duration` | Relist and confirm cloud resources this often. Default is 10 minutes (default 10m0s) | -| `--sync-rate-limit float32` | Define the sync frequency upper limit (default 0.3) | -| `--tcp-services-configmap string` | Name of the ConfigMap that contains the definition of the TCP services to expose. The key in the map indicates the external port to be used. The value is the name of the service with the format namespace/serviceName and the port of the service could be a number of the name of the port. The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend | -| `--udp-services-configmap string` | Name of the ConfigMap that contains the definition of the UDP services to expose. The key in the map indicates the external port to be used. The value is the name of the service with the format namespace/serviceName and the port of the service could be a number of the name of the port. | -| `--update-status` | Indicates if the ingress controller should update the Ingress status IP/hostname. Default is true (default true) | -| `--update-status-on-shutdown` | Indicates if the ingress controller should update the Ingress status IP/hostname when the controller is being stopped. Default is true (default true) | -| `-v`, `--v Level` | log level for V logs | -| `--version` | Shows release information about the NGINX Ingress controller | -| `--vmodule moduleSpec` | comma-separated list of pattern=N settings for file-filtered logging | -| `--watch-namespace string` | Namespace to watch for Ingress. Default is to watch all namespaces | +| --alsologtostderr | log to standard error as well as files | +| --annotations-prefix string | Prefix of the Ingress annotations specific to the NGINX controller. (default "nginx.ingress.kubernetes.io") | +| --apiserver-host string | Address of the Kubernetes API server. Takes the form "protocol://address:port". If not specified, it is assumed the program runs inside a Kubernetes cluster and local discovery is attempted. | +| --configmap string | Name of the ConfigMap containing custom global configurations for the controller. | +| --default-backend-service string | Service used to serve HTTP requests not matching any known server name (catch-all). Takes the form "namespace/name". The controller configures NGINX to forward requests to the first port of this Service. | +| --default-server-port int | Port to use for exposing the default server (catch-all). (default 8181) | +| --default-ssl-certificate string | Secret containing a SSL certificate to be used by the default HTTPS server (catch-all). Takes the form "namespace/name". | +| --election-id string | Election id to use for Ingress status updates. (default "ingress-controller-leader") | +| --enable-dynamic-configuration | Dynamically refresh backends on topology changes instead of reloading NGINX. Feature backed by OpenResty Lua libraries. | +| --enable-ssl-chain-completion | Autocomplete SSL certificate chains with missing intermediate CA certificates. A valid certificate chain is required to enable OCSP stapling. Certificates uploaded to Kubernetes must have the "Authority Information Access" X.509 v3 extension for this to succeed. (default true) | +| --enable-ssl-passthrough | Enable SSL Passthrough. | +| --force-namespace-isolation | Force namespace isolation. Prevents Ingress objects from referencing Secrets and ConfigMaps located in a different namespace than their own. May be used together with watch-namespace. | +| --health-check-path string | URL path of the health check endpoint. Configured inside the NGINX status server. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. (default "/healthz") | +| --healthz-port int | Port to use for the healthz endpoint. (default 10254) | +| --http-port int | Port to use for servicing HTTP traffic. (default 80) | +| --https-port int | Port to use for servicing HTTPS traffic. (default 443) | +| --ingress-class string | Name of the ingress class this controller satisfies. The class of an Ingress object is set using the annotation "kubernetes.io/ingress.class". All ingress classes are satisfied if this parameter is left empty. | +| --kubeconfig string | Path to a kubeconfig file containing authorization and API server information. | +| --log_backtrace_at traceLocation | when logging hits line file:N, emit a stack trace (default :0) | +| --log_dir string | If non-empty, write log files in this directory | +| --logtostderr | log to standard error instead of files (default true) | +| --profiling | Enable profiling via web interface host:port/debug/pprof/ (default true) | +| --publish-service string | Service fronting the Ingress controller. Takes the form "namespace/name". When used together with update-status, the controller mirrors the address of this service's endpoints to the load-balancer status of all Ingress objects it satisfies. | +| --publish-status-address string | Customized address to set as the load-balancer status of Ingress objects this controller satisfies. Requires the update-status parameter. | +| --report-node-internal-ip-address | Set the load-balancer status of Ingress objects to internal Node addresses instead of external. Requires the update-status parameter. | +| --sort-backends | Sort servers inside NGINX upstreams. | +| --ssl-passtrough-proxy-port int | Port to use internally for SSL Passthgough. (default 442) | +| --status-port int | Port to use for exposing NGINX status pages. (default 18080) | +| --stderrthreshold severity | logs at or above this threshold go to stderr (default 2) | +| --sync-period duration | Period at which the controller forces the repopulation of its local object stores. (default 10m0s) | +| --sync-rate-limit float32 | Define the sync frequency upper limit (default 0.3) | +| --tcp-services-configmap string | Name of the ConfigMap containing the definition of the TCP services to expose. The key in the map indicates the external port to be used. The value is a reference to a Service in the form "namespace/name:port", where "port" can either be a port number or name. TCP ports 80 and 443 are reserved by the controller for servicing HTTP traffic. | +| --udp-services-configmap string | Name of the ConfigMap containing the definition of the UDP services to expose. The key in the map indicates the external port to be used. The value is a reference to a Service in the form "namespace/name:port", where "port" can either be a port name or number. +| --update-status | Update the load-balancer status of Ingress objects this controller satisfies. Requires setting the publish-service parameter to a valid Service reference. (default true) | +| --update-status-on-shutdown | Update the load-balancer status of Ingress objects when the controller shuts down. Requires the update-status parameter. (default true) | +| --v Level | log level for V logs | +| --version | Show release information about the NGINX Ingress controller and exit. | +| --vmodule moduleSpec | comma-separated list of pattern=N settings for file-filtered logging | +| --watch-namespace string | Namespace the controller watches for updates to Kubernetes objects. This includes Ingresses, Services and all configuration resources. All namespaces are watched if this parameter is left empty. | diff --git a/docs/user-guide/custom-errors.md b/docs/user-guide/custom-errors.md index c9be1c678..03e550bd1 100644 --- a/docs/user-guide/custom-errors.md +++ b/docs/user-guide/custom-errors.md @@ -1,19 +1,30 @@ # Custom errors -In case of an error in a request the body of the response is obtained from the `default backend`. -Each request to the default backend includes two headers: +When the [`custom-http-errors`][cm-custom-http-errors] option is enabled, the Ingress controller configures NGINX so +that it passes several HTTP headers down to its `default-backend` in case of error: -- `X-Code` indicates the HTTP code to be returned to the client. -- `X-Format` the value of the `Accept` header. +| Header | Value | +| ---------------- | ------------------------------------------------ | +| `X-Code` | HTTP status code retuned by the request | +| `X-Format` | Value of the `Accept` header sent by the client | +| `X-Original-URI` | URI that caused the error | +| `X-Namespace` | Namespace where the backend Service is located | +| `X-Ingress-Name` | Name of the Ingress where the backend is defined | +| `X-Service-Name` | Name of the Service backing the backend | +| `X-Service-Port` | Port number of the Service backing the backend | + +A custom error backend can use this information to return the best possible representation of an error page. For +example, if the value of the `Accept` header send by the client was `application/json`, a carefully crafted backend +could decide to return the error payload as a JSON document instead of HTML. !!! Important - The custom backend must return the correct HTTP status code to be returned. NGINX does not change the response from the custom default backend. + The custom backend is expected to return the correct HTTP status code instead of `200`. NGINX does not change + the response from the custom default backend. -Using these two headers it's possible to use a custom backend service like [this one](https://github.com/kubernetes/ingress-nginx/tree/master/images/custom-error-pages) that inspects each request and returns a custom error page with the format expected by the client. Please check the example [custom-errors](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-errors). +An example of such custom backend is available inside the source repository at [images/custom-error-pages][img-custom-error-pages]. -NGINX sends additional headers that can be used to build custom response: +See also the [Custom errors][example-custom-errors] example. -- X-Original-URI -- X-Namespace -- X-Ingress-Name -- X-Service-Name +[cm-custom-http-errors]: ./nginx-configuration/configmap.md#custom-http-errors +[img-custom-error-pages]: https://github.com/kubernetes/ingress-nginx/tree/master/images/custom-error-pages +[example-custom-errors]: ../examples/customization/custom-errors diff --git a/docs/user-guide/nginx-configuration/annotations.md b/docs/user-guide/nginx-configuration/annotations.md index 9b9d0f8d5..56551b4e6 100644 --- a/docs/user-guide/nginx-configuration/annotations.md +++ b/docs/user-guide/nginx-configuration/annotations.md @@ -44,6 +44,7 @@ You can add these Kubernetes annotations to specific Ingress objects to customiz |[nginx.ingress.kubernetes.io/limit-rps](#rate-limiting)|number| |[nginx.ingress.kubernetes.io/permanent-redirect](#permanent-redirect)|string| |[nginx.ingress.kubernetes.io/proxy-body-size](#custom-max-body-size)|string| +|[nginx.ingress.kubernetes.io/proxy-cookie-domain](#proxy-cookie-domain)|string| |[nginx.ingress.kubernetes.io/proxy-connect-timeout](#custom-timeouts)|number| |[nginx.ingress.kubernetes.io/proxy-send-timeout](#custom-timeouts)|number| |[nginx.ingress.kubernetes.io/proxy-read-timeout](#custom-timeouts)|number| @@ -70,6 +71,7 @@ You can add these Kubernetes annotations to specific Ingress objects to customiz |[nginx.ingress.kubernetes.io/upstream-vhost](#custom-nginx-upstream-vhost)|string| |[nginx.ingress.kubernetes.io/whitelist-source-range](#whitelist-source-range)|CIDR| |[nginx.ingress.kubernetes.io/proxy-buffering](#proxy-buffering)|string| +|[nginx.ingress.kubernetes.io/proxy-buffer-size](#proxy-buffer-size)|string| |[nginx.ingress.kubernetes.io/ssl-ciphers](#ssl-ciphers)|string| |[nginx.ingress.kubernetes.io/connection-proxy-header](#connection-proxy-header)|string| |[nginx.ingress.kubernetes.io/enable-access-log](#enable-access-log)|"true" or "false"| @@ -150,7 +152,7 @@ nginx.ingress.kubernetes.io/auth-realm: "realm string" NGINX exposes some flags in the [upstream configuration](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) that enable the configuration of each server in the upstream. The Ingress controller allows custom `max_fails` and `fail_timeout` parameters in a global context using `upstream-max-fails` and `upstream-fail-timeout` in the NGINX ConfigMap or in a particular Ingress rule. `upstream-max-fails` defaults to 0. This means NGINX will respect the container's `readinessProbe` if it is defined. If there is no probe and no values for `upstream-max-fails` NGINX will continue to send traffic to the container. -!!! tip +!!! tip With the default configuration NGINX will not health check your backends. Whenever the endpoints controller notices a readiness probe failure, that pod's IP will be removed from the list of endpoints. This will trigger the NGINX controller to also remove it from the upstreams.** To use custom values in an Ingress rule define these annotations: @@ -208,9 +210,9 @@ The annotations are: !!! attention TLS with Client Authentication is **not** possible in Cloudflare and might result in unexpected behavior. - + Cloudflare only allows Authenticated Origin Pulls and is required to use their own certificate: [https://blog.cloudflare.com/protecting-the-origin-with-tls-authenticated-origin-pulls/](https://blog.cloudflare.com/protecting-the-origin-with-tls-authenticated-origin-pulls/) - + Only Authenticated Origin Pulls are allowed and can be configured by following their tutorial: [https://support.cloudflare.com/hc/en-us/articles/204494148-Setting-up-NGINX-to-use-TLS-Authenticated-Origin-Pulls](https://support.cloudflare.com/hc/en-us/articles/204494148-Setting-up-NGINX-to-use-TLS-Authenticated-Origin-Pulls) @@ -464,6 +466,12 @@ To use custom values in an Ingress rule define these annotation: nginx.ingress.kubernetes.io/proxy-body-size: 8m ``` +### Proxy cookie domain + +Sets a text that [should be changed in the domain attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain) of the "Set-Cookie" header fields of a proxied server response. + +To configure this setting globally for all Ingress rules, the `proxy-cookie-domain` value may be set in the [NGINX ConfigMap][configmap]. + ### Proxy buffering Enable or disable proxy buffering [`proxy_buffering`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering). @@ -476,6 +484,16 @@ To use custom values in an Ingress rule define these annotation: nginx.ingress.kubernetes.io/proxy-buffering: "on" ``` +### Proxy buffer size + +Sets the size of the buffer [`proxy_buffer_size`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) used for reading the first part of the response received from the proxied server. +By default proxy buffer size is set as "4k" + +To configure this setting globally, set `proxy-buffer-size` in [NGINX ConfigMap][configmap]. To use custom values in an Ingress rule, define this annotation: +```yaml +nginx.ingress.kubernetes.io/proxy-buffer-size: "8k" +``` + ### SSL ciphers Specifies the [enabled ciphers](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers). @@ -579,4 +597,3 @@ To use the module in the Kubernetes Nginx ingress controller, you have two optio - Use an InfluxDB server configured to enable the [UDP protocol](https://docs.influxdata.com/influxdb/v1.5/supported_protocols/udp/). - Deploy Telegraf as a sidecar proxy to the Ingress controller configured to listen UDP with the [socket listener input](https://github.com/influxdata/telegraf/tree/release-1.6/plugins/inputs/socket_listener) and to write using anyone of the [outputs plugins](https://github.com/influxdata/telegraf/tree/release-1.6/plugins/outputs) - diff --git a/docs/user-guide/nginx-configuration/configmap.md b/docs/user-guide/nginx-configuration/configmap.md index e532305e9..77539ed8c 100644 --- a/docs/user-guide/nginx-configuration/configmap.md +++ b/docs/user-guide/nginx-configuration/configmap.md @@ -44,10 +44,6 @@ The following table shows a configuration option's name, type, and the default v |[disable-ipv6-dns](#disable-ipv6-dns)|bool|false| |[enable-underscores-in-headers](#enable-underscores-in-headers)|bool|false| |[ignore-invalid-headers](#ignore-invalid-headers)|bool|true| -|[enable-vts-status](#enable-vts-status)|bool|false| -|[vts-status-zone-size](#vts-status-zone-size)|string|"10m"| -|[vts-sum-key](#vts-sum-key)|string|"*"| -|[vts-default-filter-key](#vts-default-filter-key)|string|"$geoip_country_code country::*"| |[retry-non-idempotent](#retry-non-idempotent)|bool|"false"| |[error-log-level](#error-log-level)|string|"notice"| |[http2-max-field-size](#http2-max-field-size)|string|"4k"| @@ -240,32 +236,6 @@ Enables underscores in header names. _**default:**_ is disabled Set if header fields with invalid names should be ignored. _**default:**_ is enabled -## enable-vts-status - -Allows the replacement of the default status page with a third party module named [nginx-module-vts](https://github.com/vozlt/nginx-module-vts). -_**default:**_ is disabled - -## vts-status-zone-size - -Vts config on http level sets parameters for a shared memory zone that will keep states for various keys. The cache is shared between all worker processes. _**default:**_ 10m - -_References:_ -[https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_zone](https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_zone) - -## vts-default-filter-key - -Vts config on http level enables the keys by user defined variable. The key is a key string to calculate traffic. The name is a group string to calculate traffic. The key and name can contain variables such as $host, $server_name. The name's group belongs to filterZones if specified. The key's group belongs to serverZones if not specified second argument name. _**default:**_ $geoip_country_code country::* - -_References:_ -[https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key](https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key) - -## vts-sum-key - -For metrics keyed (or when using Prometheus, labeled) by server zone, this value is used to indicate metrics for all server zones combined. _**default:**_ * - -_References:_ -[https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_display_sum_key](https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_display_sum_key) - ## retry-non-idempotent Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server. The previous behavior can be restored using the value "true". diff --git a/docs/user-guide/nginx-status-page.md b/docs/user-guide/nginx-status-page.md index 8152c5eae..1468b89a6 100644 --- a/docs/user-guide/nginx-status-page.md +++ b/docs/user-guide/nginx-status-page.md @@ -2,10 +2,3 @@ The [ngx_http_stub_status_module](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) module provides access to basic status information. This is the default module active in the url `/nginx_status` in the status port (default is 18080). - -This controller provides an alternative to this module using the [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) module. -To use this module just set in the configuration configmap `enable-vts-status: "true"`. - -![nginx-module-vts screenshot](https://cloud.githubusercontent.com/assets/3648408/10876811/77a67b70-8183-11e5-9924-6a6d0c5dc73a.png "screenshot with filter") - -To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json` diff --git a/images/custom-error-pages/Makefile b/images/custom-error-pages/Makefile index 47180650c..0fd5f7328 100644 --- a/images/custom-error-pages/Makefile +++ b/images/custom-error-pages/Makefile @@ -3,7 +3,7 @@ all: all-container BUILDTAGS= # Use the 0.0 tag for testing, it shouldn't clobber any release builds -TAG?=0.1 +TAG?=0.3 REGISTRY?=quay.io/kubernetes-ingress-controller GOOS?=linux DOCKER?=docker @@ -26,11 +26,11 @@ ARCH ?= $(shell go env GOARCH) GOARCH = ${ARCH} DUMB_ARCH = ${ARCH} -BASEIMAGE?=alpine:3.6 +BASEIMAGE?=alpine:3.7 ALL_ARCH = amd64 arm arm64 ppc64le -QEMUVERSION=v2.9.1 +QEMUVERSION=v2.12.0 IMGNAME = custom-error-pages IMAGE = $(REGISTRY)/$(IMGNAME) @@ -74,7 +74,7 @@ ifeq ($(ARCH),amd64) else # When cross-building, only the placeholder "CROSS_BUILD_" should be removed # Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel - $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset + # $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)/rootfs $(SED_I) "s/CROSS_BUILD_//g" $(DOCKERFILE) endif @@ -103,3 +103,8 @@ build: clean release: all-container all-push echo "done" + +.PHONY: register-qemu +register-qemu: + # Register /usr/bin/qemu-ARCH-static as the handler for binaries in multiple platforms + $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset diff --git a/images/custom-error-pages/README.md b/images/custom-error-pages/README.md index 3ee67e5d9..88983591f 100644 --- a/images/custom-error-pages/README.md +++ b/images/custom-error-pages/README.md @@ -1,2 +1,3 @@ +# custom-error-pages Example of Custom error pages for the NGINX Ingress controller diff --git a/images/custom-error-pages/main.go b/images/custom-error-pages/main.go index 245bd2c59..e4f1ccb23 100644 --- a/images/custom-error-pages/main.go +++ b/images/custom-error-pages/main.go @@ -39,15 +39,34 @@ const ( // ContentType name of the header that defines the format of the reply ContentType = "Content-Type" + + // OriginalURI name of the header with the original URL from NGINX + OriginalURI = "X-Original-URI" + + // Namespace name of the header that contains information about the Ingress namespace + Namespace = "X-Namespace" + + // IngressName name of the header that contains the matched Ingress + IngressName = "X-Ingress-Name" + + // ServiceName name of the header that contains the matched Service in the Ingress + ServiceName = "X-Service-Name" + + // ServicePort name of the header that contains the matched Service port in the Ingress + ServicePort = "X-Service-Port" + + // ErrFilesPathVar is the name of the environment variable indicating + // the location on disk of files served by the handler. + ErrFilesPathVar = "ERROR_FILES_PATH" ) func main() { - path := "/www" - if os.Getenv("PATH") != "" { - path = os.Getenv("PATH") + errFilesPath := "/www" + if os.Getenv(ErrFilesPathVar) != "" { + errFilesPath = os.Getenv(ErrFilesPathVar) } - http.HandleFunc("/", errorHandler(path)) + http.HandleFunc("/", errorHandler(errFilesPath)) http.Handle("/metrics", promhttp.Handler()) @@ -63,18 +82,28 @@ func errorHandler(path string) func(http.ResponseWriter, *http.Request) { start := time.Now() ext := "html" + if os.Getenv("DEBUG") != "" { + w.Header().Set(FormatHeader, r.Header.Get(FormatHeader)) + w.Header().Set(CodeHeader, r.Header.Get(CodeHeader)) + w.Header().Set(ContentType, r.Header.Get(ContentType)) + w.Header().Set(OriginalURI, r.Header.Get(OriginalURI)) + w.Header().Set(Namespace, r.Header.Get(Namespace)) + w.Header().Set(IngressName, r.Header.Get(IngressName)) + w.Header().Set(ServiceName, r.Header.Get(ServiceName)) + w.Header().Set(ServicePort, r.Header.Get(ServicePort)) + } + format := r.Header.Get(FormatHeader) if format == "" { format = "text/html" - log.Printf("forma not specified. Using %v\n", format) + log.Printf("format not specified. Using %v", format) } - mediaType, _, _ := mime.ParseMediaType(format) - cext, err := mime.ExtensionsByType(mediaType) + cext, err := mime.ExtensionsByType(format) if err != nil { - log.Printf("unexpected error reading media type extension: %v. Using %v\n", err, ext) + log.Printf("unexpected error reading media type extension: %v. Using %v", err, ext) } else if len(cext) == 0 { - log.Printf("couldn't get media type extension. Using %v\n", ext) + log.Printf("couldn't get media type extension. Using %v", ext) } else { ext = cext[0] } @@ -84,7 +113,7 @@ func errorHandler(path string) func(http.ResponseWriter, *http.Request) { code, err := strconv.Atoi(errCode) if err != nil { code = 404 - log.Printf("unexpected error reading return code: %v. Using %v\n", err, code) + log.Printf("unexpected error reading return code: %v. Using %v", err, code) } w.WriteHeader(code) @@ -94,22 +123,22 @@ func errorHandler(path string) func(http.ResponseWriter, *http.Request) { file := fmt.Sprintf("%v/%v%v", path, code, ext) f, err := os.Open(file) if err != nil { - log.Printf("unexpected error opening file: %v\n", err) + log.Printf("unexpected error opening file: %v", err) scode := strconv.Itoa(code) file := fmt.Sprintf("%v/%cxx%v", path, scode[0], ext) f, err := os.Open(file) if err != nil { - log.Printf("unexpected error opening file: %v\n", err) + log.Printf("unexpected error opening file: %v", err) http.NotFound(w, r) return } defer f.Close() - log.Printf("serving custom error response for code %v and format %v from file %v\n", code, format, file) + log.Printf("serving custom error response for code %v and format %v from file %v", code, format, file) io.Copy(w, f) return } defer f.Close() - log.Printf("serving custom error response for code %v and format %v from file %v\n", code, format, file) + log.Printf("serving custom error response for code %v and format %v from file %v", code, format, file) io.Copy(w, f) duration := time.Now().Sub(start).Seconds() diff --git a/images/custom-error-pages/rootfs/etc/mime.types b/images/custom-error-pages/rootfs/etc/mime.types new file mode 100644 index 000000000..4c5804d73 --- /dev/null +++ b/images/custom-error-pages/rootfs/etc/mime.types @@ -0,0 +1,1830 @@ +# extracted from mailcap-2.1.48.tar.xz @ https://releases.pagure.org/mailcap/ + +# This is a comment. I love comments. -*- indent-tabs-mode: t -*- + +# This file controls what Internet media types are sent to the client for +# given file extension(s). Sending the correct media type to the client +# is important so they know how to handle the content of the file. +# Extra types can either be added here or by using an AddType directive +# in your config files. For more information about Internet media types, +# please read RFC 2045, 2046, 2047, 2048, and 2077. The Internet media type +# registry is at . + +# IANA types + +# MIME type Extensions +application/1d-interleaved-parityfec +application/3gpdash-qoe-report+xml +application/3gpp-ims+xml +application/A2L a2l +application/activemessage +application/alto-costmap+json +application/alto-costmapfilter+json +application/alto-directory+json +application/alto-endpointcost+json +application/alto-endpointcostparams+json +application/alto-endpointprop+json +application/alto-endpointpropparams+json +application/alto-error+json +application/alto-networkmap+json +application/alto-networkmapfilter+json +application/AML aml +application/andrew-inset ez +application/applefile +application/ATF atf +application/ATFX atfx +application/ATXML atxml +application/atom+xml atom +application/atomcat+xml atomcat +application/atomdeleted+xml atomdeleted +application/atomicmail +application/atomsvc+xml atomsvc +application/auth-policy+xml apxml +application/bacnet-xdd+zip xdd +application/batch-SMTP +application/beep+xml +application/calendar+json +application/calendar+xml xcs +application/call-completion +application/cals-1840 +application/cbor cbor +application/ccmp+xml ccmp +application/ccxml+xml ccxml +application/CDFX+XML cdfx +application/cdmi-capability cdmia +application/cdmi-container cdmic +application/cdmi-domain cdmid +application/cdmi-object cdmio +application/cdmi-queue cdmiq +application/cdni +application/CEA cea +application/cea-2018+xml +application/cellml+xml cellml cml +application/cfw +application/clue_info+xml clue +application/cms cmsc +application/cnrp+xml +application/coap-group+json +application/coap-payload +application/commonground +application/conference-info+xml +application/cpl+xml cpl +application/cose +application/cose-key +application/cose-key-set +application/csrattrs csrattrs +application/csta+xml +application/CSTAdata+xml +application/csvm+json +application/cybercash +application/dash+xml mpd +application/dashdelta mpdd +application/davmount+xml davmount +application/dca-rft +application/DCD dcd +application/dec-dx +application/dialog-info+xml +application/dicom dcm +application/dicom+json +application/dicom+xml +application/DII dii +application/DIT dit +application/dns +application/dskpp+xml xmls +application/dssc+der dssc +application/dssc+xml xdssc +application/dvcs dvc +application/ecmascript es +application/EDI-Consent +application/EDI-X12 +application/EDIFACT +application/efi efi +application/EmergencyCallData.Comment+xml +application/EmergencyCallData.Control+xml +application/EmergencyCallData.DeviceInfo+xml +application/EmergencyCallData.eCall.MSD +application/EmergencyCallData.ProviderInfo+xml +application/EmergencyCallData.ServiceInfo+xml +application/EmergencyCallData.SubscriberInfo+xml +application/EmergencyCallData.VEDS+xml +application/emma+xml emma +application/emotionml+xml emotionml +application/encaprtp +application/epp+xml +application/epub+zip epub +application/eshop +application/exi exi +application/fastinfoset finf +application/fastsoap +application/fdt+xml fdt +# fits, fit, fts: image/fits +application/fits +# application/font-sfnt deprecated in favor of font/sfnt +application/font-tdpfr pfr +# application/font-woff deprecated in favor of font/woff +application/framework-attributes+xml +application/geo+json geojson +application/geo+json-seq +application/gml+xml gml +application/gzip gz tgz +application/H224 +application/held+xml +application/http +application/hyperstudio stk +application/ibe-key-request+xml +application/ibe-pkg-reply+xml +application/ibe-pp-data +application/iges +application/im-iscomposing+xml +application/index +application/index.cmd +application/index.obj +application/index.response +application/index.vnd +application/inkml+xml ink inkml +application/iotp +application/ipfix ipfix +application/ipp +application/isup +application/its+xml its +application/javascript js +application/jose +application/jose+json +application/jrd+json jrd +application/json json +application/json-patch+json json-patch +application/json-seq +application/jwk+json +application/jwk-set+json +application/jwt +application/kpml-request+xml +application/kpml-response+xml +application/ld+json jsonld +application/lgr+xml lgr +application/link-format wlnk +application/load-control+xml +application/lost+xml lostxml +application/lostsync+xml lostsyncxml +application/LXF lxf +application/mac-binhex40 hqx +application/macwriteii +application/mads+xml mads +application/marc mrc +application/marcxml+xml mrcx +application/mathematica nb ma mb +application/mathml-content+xml +application/mathml-presentation+xml +application/mathml+xml mml +application/mbms-associated-procedure-description+xml +application/mbms-deregister+xml +application/mbms-envelope+xml +application/mbms-msk-response+xml +application/mbms-msk+xml +application/mbms-protection-description+xml +application/mbms-reception-report+xml +application/mbms-register-response+xml +application/mbms-register+xml +application/mbms-schedule+xml +application/mbms-user-service-description+xml +application/mbox mbox +application/media_control+xml +# mpf: text/vnd.ms-mediapackage +application/media-policy-dataset+xml +application/mediaservercontrol+xml +application/merge-patch+json +application/metalink4+xml meta4 +application/mets+xml mets +application/MF4 mf4 +application/mikey +application/mods+xml mods +application/moss-keys +application/moss-signature +application/mosskey-data +application/mosskey-request +application/mp21 m21 mp21 +# mp4, mpg4: video/mp4, see RFC 4337 +application/mp4 +application/mpeg4-generic +application/mpeg4-iod +application/mpeg4-iod-xmt +# xdf: application/xcap-diff+xml +application/mrb-consumer+xml +application/mrb-publish+xml +application/msc-ivr+xml +application/msc-mixer+xml +application/msword doc +application/mud+json +application/mxf mxf +application/n-quads nq +application/n-triples nt +application/nasdata +application/news-checkgroups +application/news-groupinfo +application/news-transmission +application/nlsml+xml +application/nss +application/ocsp-request orq +application/ocsp-response ors +application/octet-stream bin lha lzh exe class so dll img iso +application/oda oda +application/ODX odx +application/oebps-package+xml opf +application/ogg ogx +application/oxps oxps +application/p2p-overlay+xml relo +application/parityfec +# xer: application/xcap-error+xml +application/patch-ops-error+xml +application/pdf pdf +application/PDX pdx +application/pgp-encrypted pgp +application/pgp-keys +application/pgp-signature sig +application/pidf-diff+xml +application/pidf+xml +application/pkcs10 p10 +application/pkcs12 p12 pfx +application/pkcs7-mime p7m p7c +application/pkcs7-signature p7s +application/pkcs8 p8 +# ac: application/vnd.nokia.n-gage.ac+xml +application/pkix-attr-cert +application/pkix-cert cer +application/pkix-crl crl +application/pkix-pkipath pkipath +application/pkixcmp pki +application/pls+xml pls +application/poc-settings+xml +application/postscript ps eps ai +application/ppsp-tracker+json +application/problem+json +application/problem+xml +application/provenance+xml provx +application/prs.alvestrand.titrax-sheet +application/prs.cww cw cww +application/prs.hpub+zip hpub +application/prs.nprend rnd rct +application/prs.plucker +application/prs.rdf-xml-crypt rdf-crypt +application/prs.xsf+xml xsf +application/pskc+xml pskcxml +application/qsig +application/raptorfec +application/rdap+json +application/rdf+xml rdf +application/reginfo+xml rif +application/relax-ng-compact-syntax rnc +application/remote-printing +application/reputon+json +application/resource-lists-diff+xml rld +application/resource-lists+xml rl +application/rfc+xml rfcxml +application/riscos +application/rlmi+xml +application/rls-services+xml rs +application/rpki-ghostbusters gbr +application/rpki-manifest mft +application/rpki-publication +application/rpki-roa roa +application/rpki-updown +application/rtf rtf +application/rtploopback +application/rtx +application/samlassertion+xml +application/samlmetadata+xml +application/sbml+xml +application/scaip+xml +# scm: application/vnd.lotus-screencam +application/scim+json scim +application/scvp-cv-request scq +application/scvp-cv-response scs +application/scvp-vp-request spq +application/scvp-vp-response spp +application/sdp sdp +application/sep+xml +application/sep-exi +application/session-info +application/set-payment +application/set-payment-initiation +application/set-registration +application/set-registration-initiation +application/sgml +application/sgml-open-catalog soc +application/shf+xml shf +application/sieve siv sieve +application/simple-filter+xml cl +application/simple-message-summary +application/simpleSymbolContainer +application/slate +# application/smil obsoleted by application/smil+xml +application/smil+xml smil smi sml +application/smpte336m +application/soap+fastinfoset +application/soap+xml +application/sparql-query rq +application/sparql-results+xml srx +application/spirits-event+xml +application/sql sql +application/srgs gram +application/srgs+xml grxml +application/sru+xml sru +application/ssml+xml ssml +application/tamp-apex-update tau +application/tamp-apex-update-confirm auc +application/tamp-community-update tcu +application/tamp-community-update-confirm cuc +application/tamp-error ter +application/tamp-sequence-adjust tsa +application/tamp-sequence-adjust-confirm sac +# tsq: application/timestamp-query +application/tamp-status-query +# tsr: application/timestamp-reply +application/tamp-status-response +application/tamp-update tur +application/tamp-update-confirm tuc +application/tei+xml tei teiCorpus odd +application/thraud+xml tfi +application/timestamp-query tsq +application/timestamp-reply tsr +application/timestamped-data tsd +application/trig trig +application/ttml+xml ttml +application/tve-trigger +application/ulpfec +application/urc-grpsheet+xml gsheet +application/urc-ressheet+xml rsheet +application/urc-targetdesc+xml td +application/urc-uisocketdesc+xml uis +application/vcard+json +application/vcard+xml +application/vemmi +application/vnd.3gpp.access-transfer-events+xml +application/vnd.3gpp.bsf+xml +application/vnd.3gpp.mid-call+xml +application/vnd.3gpp.pic-bw-large plb +application/vnd.3gpp.pic-bw-small psb +application/vnd.3gpp.pic-bw-var pvb +application/vnd.3gpp-prose+xml +application/vnd.3gpp-prose-pc3ch+xml +# sms: application/vnd.3gpp2.sms +application/vnd.3gpp.sms +application/vnd.3gpp.sms+xml +application/vnd.3gpp.srvcc-ext+xml +application/vnd.3gpp.SRVCC-info+xml +application/vnd.3gpp.state-and-event-info+xml +application/vnd.3gpp.ussd+xml +application/vnd.3gpp2.bcmcsinfo+xml +application/vnd.3gpp2.sms sms +application/vnd.3gpp2.tcap tcap +application/vnd.3lightssoftware.imagescal imgcal +application/vnd.3M.Post-it-Notes pwn +application/vnd.accpac.simply.aso aso +application/vnd.accpac.simply.imp imp +application/vnd.acucobol acu +application/vnd.acucorp atc acutc +application/vnd.adobe.flash.movie swf +application/vnd.adobe.formscentral.fcdt fcdt +application/vnd.adobe.fxp fxp fxpl +application/vnd.adobe.partial-upload +application/vnd.adobe.xdp+xml xdp +application/vnd.adobe.xfdf xfdf +application/vnd.aether.imp +application/vnd.ah-barcode +application/vnd.ahead.space ahead +application/vnd.airzip.filesecure.azf azf +application/vnd.airzip.filesecure.azs azs +application/vnd.amazon.mobi8-ebook azw3 +application/vnd.americandynamics.acc acc +application/vnd.amiga.ami ami +application/vnd.amundsen.maze+xml +application/vnd.anki apkg +application/vnd.anser-web-certificate-issue-initiation cii +# Not in IANA listing, but is on FTP site? +application/vnd.anser-web-funds-transfer-initiation fti +# atx: audio/ATRAC-X +application/vnd.antix.game-component +application/vnd.apache.thrift.binary +application/vnd.apache.thrift.compact +application/vnd.apache.thrift.json +application/vnd.api+json +application/vnd.apothekende.reservation+json +application/vnd.apple.installer+xml dist distz pkg mpkg +# m3u: audio/x-mpegurl for now +application/vnd.apple.mpegurl m3u8 +# application/vnd.arastra.swi obsoleted by application/vnd.aristanetworks.swi +application/vnd.aristanetworks.swi swi +application/vnd.artsquare +application/vnd.astraea-software.iota iota +application/vnd.audiograph aep +application/vnd.autopackage package +application/vnd.avistar+xml +application/vnd.balsamiq.bmml+xml bmml +application/vnd.balsamiq.bmpr bmpr +application/vnd.bekitzur-stech+json +application/vnd.bint.med-content +application/vnd.biopax.rdf+xml +application/vnd.blueice.multipass mpm +application/vnd.bluetooth.ep.oob ep +application/vnd.bluetooth.le.oob le +application/vnd.bmi bmi +application/vnd.businessobjects rep +application/vnd.cab-jscript +application/vnd.canon-cpdl +application/vnd.canon-lips +application/vnd.capasystems-pg+json +application/vnd.cendio.thinlinc.clientconf tlclient +application/vnd.century-systems.tcp_stream +application/vnd.chemdraw+xml cdxml +application/vnd.chess-pgn pgn +application/vnd.chipnuts.karaoke-mmd mmd +application/vnd.cinderella cdy +application/vnd.cirpack.isdn-ext +application/vnd.citationstyles.style+xml csl +application/vnd.claymore cla +application/vnd.cloanto.rp9 rp9 +application/vnd.clonk.c4group c4g c4d c4f c4p c4u +application/vnd.cluetrust.cartomobile-config c11amc +application/vnd.cluetrust.cartomobile-config-pkg c11amz +application/vnd.coffeescript coffee +application/vnd.collection+json +application/vnd.collection.doc+json +application/vnd.collection.next+json +application/vnd.comicbook+zip cbz +# icc: application/vnd.iccprofile +application/vnd.commerce-battelle ica icf icd ic0 ic1 ic2 ic3 ic4 ic5 ic6 ic7 ic8 +application/vnd.commonspace csp cst +application/vnd.contact.cmsg cdbcmsg +application/vnd.coreos.ignition+json ign ignition +application/vnd.cosmocaller cmc +application/vnd.crick.clicker clkx +application/vnd.crick.clicker.keyboard clkk +application/vnd.crick.clicker.palette clkp +application/vnd.crick.clicker.template clkt +application/vnd.crick.clicker.wordbank clkw +application/vnd.criticaltools.wbs+xml wbs +application/vnd.ctc-posml pml +application/vnd.ctct.ws+xml +application/vnd.cups-pdf +application/vnd.cups-postscript +application/vnd.cups-ppd ppd +application/vnd.cups-raster +application/vnd.cups-raw +application/vnd.curl curl +application/vnd.cyan.dean.root+xml +application/vnd.cybank +application/vnd.d2l.coursepackage1p0+zip +application/vnd.dart dart +application/vnd.data-vision.rdz rdz +application/vnd.datapackage+json +application/vnd.dataresource+json +application/vnd.debian.binary-package deb udeb +application/vnd.dece.data uvf uvvf uvd uvvd +application/vnd.dece.ttml+xml uvt uvvt +application/vnd.dece.unspecified uvx uvvx +application/vnd.dece.zip uvz uvvz +application/vnd.denovo.fcselayout-link fe_launch +application/vnd.desmume.movie dsm +application/vnd.dir-bi.plate-dl-nosuffix +application/vnd.dm.delegation+xml +application/vnd.dna dna +application/vnd.document+json docjson +application/vnd.dolby.mobile.1 +application/vnd.dolby.mobile.2 +application/vnd.doremir.scorecloud-binary-document scld +application/vnd.dpgraph dpg mwc dpgraph +application/vnd.dreamfactory dfac +application/vnd.drive+json +application/vnd.dtg.local +application/vnd.dtg.local.flash fla +application/vnd.dtg.local.html +application/vnd.dvb.ait ait +# class: application/octet-stream +application/vnd.dvb.dvbj +application/vnd.dvb.esgcontainer +application/vnd.dvb.ipdcdftnotifaccess +application/vnd.dvb.ipdcesgaccess +application/vnd.dvb.ipdcesgaccess2 +application/vnd.dvb.ipdcesgpdd +application/vnd.dvb.ipdcroaming +application/vnd.dvb.iptv.alfec-base +application/vnd.dvb.iptv.alfec-enhancement +application/vnd.dvb.notif-aggregate-root+xml +application/vnd.dvb.notif-container+xml +application/vnd.dvb.notif-generic+xml +application/vnd.dvb.notif-ia-msglist+xml +application/vnd.dvb.notif-ia-registration-request+xml +application/vnd.dvb.notif-ia-registration-response+xml +application/vnd.dvb.notif-init+xml +# pfr: application/font-tdpfr +application/vnd.dvb.pfr +application/vnd.dvb.service svc +# dxr: application/x-director +application/vnd.dxr +application/vnd.dynageo geo +application/vnd.dzr dzr +application/vnd.easykaraoke.cdgdownload +application/vnd.ecdis-update +application/vnd.ecowin.chart mag +application/vnd.ecowin.filerequest +application/vnd.ecowin.fileupdate +application/vnd.ecowin.series +application/vnd.ecowin.seriesrequest +application/vnd.ecowin.seriesupdate +# img: application/octet-stream +application/vnd.efi-img +# iso: application/octet-stream +application/vnd.efi-iso +application/vnd.enliven nml +application/vnd.enphase.envoy +application/vnd.eprints.data+xml +application/vnd.epson.esf esf +application/vnd.epson.msf msf +application/vnd.epson.quickanime qam +application/vnd.epson.salt slt +application/vnd.epson.ssf ssf +application/vnd.ericsson.quickcall qcall qca +application/vnd.espass-espass+zip espass +application/vnd.eszigno3+xml es3 et3 +application/vnd.etsi.aoc+xml +application/vnd.etsi.asic-e+zip asice sce +# scs: application/scvp-cv-response +application/vnd.etsi.asic-s+zip asics +application/vnd.etsi.cug+xml +application/vnd.etsi.iptvcommand+xml +application/vnd.etsi.iptvdiscovery+xml +application/vnd.etsi.iptvprofile+xml +application/vnd.etsi.iptvsad-bc+xml +application/vnd.etsi.iptvsad-cod+xml +application/vnd.etsi.iptvsad-npvr+xml +application/vnd.etsi.iptvservice+xml +application/vnd.etsi.iptvsync+xml +application/vnd.etsi.iptvueprofile+xml +application/vnd.etsi.mcid+xml +application/vnd.etsi.mheg5 +application/vnd.etsi.overload-control-policy-dataset+xml +application/vnd.etsi.pstn+xml +application/vnd.etsi.sci+xml +application/vnd.etsi.simservs+xml +application/vnd.etsi.timestamp-token tst +application/vnd.etsi.tsl.der +application/vnd.etsi.tsl+xml +application/vnd.eudora.data +application/vnd.ezpix-album ez2 +application/vnd.ezpix-package ez3 +application/vnd.f-secure.mobile +application/vnd.fastcopy-disk-image dim +application/vnd.fdf fdf +application/vnd.fdsn.mseed msd mseed +application/vnd.fdsn.seed seed dataless +application/vnd.ffsns +application/vnd.filmit.zfc zfc +# all extensions: application/vnd.hbci +application/vnd.fints +application/vnd.firemonkeys.cloudcell +application/vnd.FloGraphIt gph +application/vnd.fluxtime.clip ftc +application/vnd.font-fontforge-sfd sfd +application/vnd.framemaker fm +application/vnd.frogans.fnc fnc +application/vnd.frogans.ltf ltf +application/vnd.fsc.weblaunch fsc +application/vnd.fujitsu.oasys oas +application/vnd.fujitsu.oasys2 oa2 +application/vnd.fujitsu.oasys3 oa3 +application/vnd.fujitsu.oasysgp fg5 +application/vnd.fujitsu.oasysprs bh2 +application/vnd.fujixerox.ART-EX +application/vnd.fujixerox.ART4 +application/vnd.fujixerox.ddd ddd +application/vnd.fujixerox.docuworks xdw +application/vnd.fujixerox.docuworks.binder xbd +application/vnd.fujixerox.docuworks.container xct +application/vnd.fujixerox.HBPL +application/vnd.fut-misnet +application/vnd.fuzzysheet fzs +application/vnd.genomatix.tuxedo txd +# application/vnd.geo+json obsoleted by application/geo+json +application/vnd.geocube+xml g3 g³ +application/vnd.geogebra.file ggb +application/vnd.geogebra.tool ggt +application/vnd.geometry-explorer gex gre +application/vnd.geonext gxt +application/vnd.geoplan g2w +application/vnd.geospace g3w +# gbr: application/rpki-ghostbusters +application/vnd.gerber +application/vnd.globalplatform.card-content-mgt +application/vnd.globalplatform.card-content-mgt-response +application/vnd.gmx gmx +application/vnd.google-earth.kml+xml kml +application/vnd.google-earth.kmz kmz +application/vnd.gov.sk.e-form+xml +application/vnd.gov.sk.e-form+zip +application/vnd.gov.sk.xmldatacontainer+xml +application/vnd.grafeq gqf gqs +application/vnd.gridmp +application/vnd.groove-account gac +application/vnd.groove-help ghf +application/vnd.groove-identity-message gim +application/vnd.groove-injector grv +application/vnd.groove-tool-message gtm +application/vnd.groove-tool-template tpl +application/vnd.groove-vcard vcg +application/vnd.hal+json +application/vnd.hal+xml hal +application/vnd.HandHeld-Entertainment+xml zmm +application/vnd.hbci hbci hbc kom upa pkd bpd +application/vnd.hc+json +# rep: application/vnd.businessobjects +application/vnd.hcl-bireports +application/vnd.hdt hdt +application/vnd.heroku+json +application/vnd.hhe.lesson-player les +application/vnd.hp-HPGL hpgl +application/vnd.hp-hpid hpi hpid +application/vnd.hp-hps hps +application/vnd.hp-jlyt jlt +application/vnd.hp-PCL pcl +application/vnd.hp-PCLXL +application/vnd.httphone +application/vnd.hydrostatix.sof-data sfd-hdstx +application/vnd.hyperdrive+json +application/vnd.hzn-3d-crossword x3d +application/vnd.ibm.afplinedata +application/vnd.ibm.electronic-media emm +application/vnd.ibm.MiniPay mpy +application/vnd.ibm.modcap list3820 listafp afp pseg3820 +application/vnd.ibm.rights-management irm +application/vnd.ibm.secure-container sc +application/vnd.iccprofile icc icm +application/vnd.ieee.1905 1905.1 +application/vnd.igloader igl +application/vnd.imagemeter.folder+zip imf +application/vnd.imagemeter.image+zip imi +application/vnd.immervision-ivp ivp +application/vnd.immervision-ivu ivu +application/vnd.ims.imsccv1p1 imscc +application/vnd.ims.imsccv1p2 +application/vnd.ims.imsccv1p3 +application/vnd.ims.lis.v2.result+json +application/vnd.ims.lti.v2.toolconsumerprofile+json +application/vnd.ims.lti.v2.toolproxy.id+json +application/vnd.ims.lti.v2.toolproxy+json +application/vnd.ims.lti.v2.toolsettings+json +application/vnd.ims.lti.v2.toolsettings.simple+json +application/vnd.informedcontrol.rms+xml +# application/vnd.informix-visionary obsoleted by application/vnd.visionary +application/vnd.infotech.project +application/vnd.infotech.project+xml +application/vnd.innopath.wamp.notification +application/vnd.insors.igm igm +application/vnd.intercon.formnet xpw xpx +application/vnd.intergeo i2g +application/vnd.intertrust.digibox +application/vnd.intertrust.nncp +application/vnd.intu.qbo qbo +application/vnd.intu.qfx qfx +application/vnd.iptc.g2.catalogitem+xml +application/vnd.iptc.g2.conceptitem+xml +application/vnd.iptc.g2.knowledgeitem+xml +application/vnd.iptc.g2.newsitem+xml +application/vnd.iptc.g2.newsmessage+xml +application/vnd.iptc.g2.packageitem+xml +application/vnd.iptc.g2.planningitem+xml +application/vnd.ipunplugged.rcprofile rcprofile +application/vnd.irepository.package+xml irp +application/vnd.is-xpr xpr +application/vnd.isac.fcs fcs +application/vnd.jam jam +application/vnd.japannet-directory-service +application/vnd.japannet-jpnstore-wakeup +application/vnd.japannet-payment-wakeup +application/vnd.japannet-registration +application/vnd.japannet-registration-wakeup +application/vnd.japannet-setstore-wakeup +application/vnd.japannet-verification +application/vnd.japannet-verification-wakeup +application/vnd.jcp.javame.midlet-rms rms +application/vnd.jisp jisp +application/vnd.joost.joda-archive joda +application/vnd.jsk.isdn-ngn +application/vnd.kahootz ktz ktr +application/vnd.kde.karbon karbon +application/vnd.kde.kchart chrt +application/vnd.kde.kformula kfo +application/vnd.kde.kivio flw +application/vnd.kde.kontour kon +application/vnd.kde.kpresenter kpr kpt +application/vnd.kde.kspread ksp +application/vnd.kde.kword kwd kwt +application/vnd.kenameaapp htke +application/vnd.kidspiration kia +application/vnd.Kinar kne knp sdf +application/vnd.koan skp skd skm skt +application/vnd.kodak-descriptor sse +application/vnd.las.las+json lasjson +application/vnd.las.las+xml lasxml +application/vnd.liberty-request+xml +application/vnd.llamagraphics.life-balance.desktop lbd +application/vnd.llamagraphics.life-balance.exchange+xml lbe +application/vnd.lotus-1-2-3 123 wk4 wk3 wk1 +application/vnd.lotus-approach apr vew +application/vnd.lotus-freelance prz pre +application/vnd.lotus-notes nsf ntf ndl ns4 ns3 ns2 nsh nsg +application/vnd.lotus-organizer or3 or2 org +application/vnd.lotus-screencam scm +application/vnd.lotus-wordpro lwp sam +application/vnd.macports.portpkg portpkg +application/vnd.mapbox-vector-tile mvt +application/vnd.marlin.drm.actiontoken+xml +application/vnd.marlin.drm.conftoken+xml +application/vnd.marlin.drm.license+xml +application/vnd.marlin.drm.mdcf mdc +application/vnd.mason+json +application/vnd.maxmind.maxmind-db mmdb +application/vnd.mcd mcd +application/vnd.medcalcdata mc1 +application/vnd.mediastation.cdkey cdkey +application/vnd.meridian-slingshot +application/vnd.MFER mwf +application/vnd.mfmp mfm +application/vnd.micro+json +application/vnd.micrografx.flo flo +application/vnd.micrografx.igx igx +application/vnd.microsoft.portable-executable +application/vnd.microsoft.windows.thumbnail-cache +application/vnd.miele+json +application/vnd.mif mif +application/vnd.minisoft-hp3000-save +application/vnd.mitsubishi.misty-guard.trustweb +application/vnd.Mobius.DAF daf +application/vnd.Mobius.DIS dis +application/vnd.Mobius.MBK mbk +application/vnd.Mobius.MQY mqy +application/vnd.Mobius.MSL msl +application/vnd.Mobius.PLC plc +application/vnd.Mobius.TXF txf +application/vnd.mophun.application mpn +application/vnd.mophun.certificate mpc +application/vnd.motorola.flexsuite +application/vnd.motorola.flexsuite.adsi +application/vnd.motorola.flexsuite.fis +application/vnd.motorola.flexsuite.gotap +application/vnd.motorola.flexsuite.kmr +application/vnd.motorola.flexsuite.ttc +application/vnd.motorola.flexsuite.wem +application/vnd.motorola.iprm +application/vnd.mozilla.xul+xml xul +application/vnd.ms-3mfdocument 3mf +application/vnd.ms-artgalry cil +application/vnd.ms-asf asf +application/vnd.ms-cab-compressed cab +application/vnd.ms-excel xls xlm xla xlc xlt xlw +application/vnd.ms-excel.template.macroEnabled.12 xltm +application/vnd.ms-excel.addin.macroEnabled.12 xlam +application/vnd.ms-excel.sheet.binary.macroEnabled.12 xlsb +application/vnd.ms-excel.sheet.macroEnabled.12 xlsm +application/vnd.ms-fontobject eot +application/vnd.ms-htmlhelp chm +application/vnd.ms-ims ims +application/vnd.ms-lrm lrm +application/vnd.ms-office.activeX+xml +application/vnd.ms-officetheme thmx +application/vnd.ms-playready.initiator+xml +application/vnd.ms-powerpoint ppt pps pot +application/vnd.ms-powerpoint.addin.macroEnabled.12 ppam +application/vnd.ms-powerpoint.presentation.macroEnabled.12 pptm +application/vnd.ms-powerpoint.slide.macroEnabled.12 sldm +application/vnd.ms-powerpoint.slideshow.macroEnabled.12 ppsm +application/vnd.ms-powerpoint.template.macroEnabled.12 potm +application/vnd.ms-PrintDeviceCapabilities+xml +application/vnd.ms-PrintSchemaTicket+xml +application/vnd.ms-project mpp mpt +application/vnd.ms-tnef tnef tnf +application/vnd.ms-windows.devicepairing +application/vnd.ms-windows.nwprinting.oob +application/vnd.ms-windows.printerpairing +application/vnd.ms-windows.wsd.oob +application/vnd.ms-wmdrm.lic-chlg-req +application/vnd.ms-wmdrm.lic-resp +application/vnd.ms-wmdrm.meter-chlg-req +application/vnd.ms-wmdrm.meter-resp +application/vnd.ms-word.document.macroEnabled.12 docm +application/vnd.ms-word.template.macroEnabled.12 dotm +application/vnd.ms-works wcm wdb wks wps +application/vnd.ms-wpl wpl +application/vnd.ms-xpsdocument xps +application/vnd.msa-disk-image msa +application/vnd.mseq mseq +application/vnd.msign +application/vnd.multiad.creator crtr +application/vnd.multiad.creator.cif cif +application/vnd.music-niff +application/vnd.musician mus +application/vnd.muvee.style msty +application/vnd.mynfc taglet +application/vnd.ncd.control +application/vnd.ncd.reference +application/vnd.nearst.inv+json +application/vnd.nervana entity request bkm kcm +application/vnd.netfpx +# ntf: application/vnd.lotus-notes +application/vnd.nitf nitf +application/vnd.neurolanguage.nlu nlu +application/vnd.nintendo.nitro.rom nds +application/vnd.nintendo.snes.rom sfc smc +application/vnd.noblenet-directory nnd +application/vnd.noblenet-sealer nns +application/vnd.noblenet-web nnw +application/vnd.nokia.catalogs +application/vnd.nokia.conml+wbxml +application/vnd.nokia.conml+xml +application/vnd.nokia.iptv.config+xml +application/vnd.nokia.iSDS-radio-presets +application/vnd.nokia.landmark+wbxml +application/vnd.nokia.landmark+xml +application/vnd.nokia.landmarkcollection+xml +application/vnd.nokia.n-gage.ac+xml ac +application/vnd.nokia.n-gage.data ngdat +application/vnd.nokia.n-gage.symbian.install n-gage +application/vnd.nokia.ncd +application/vnd.nokia.pcd+wbxml +application/vnd.nokia.pcd+xml +application/vnd.nokia.radio-preset rpst +application/vnd.nokia.radio-presets rpss +application/vnd.novadigm.EDM edm +application/vnd.novadigm.EDX edx +application/vnd.novadigm.EXT ext +application/vnd.ntt-local.content-share +application/vnd.ntt-local.file-transfer +application/vnd.ntt-local.ogw_remote-access +application/vnd.ntt-local.sip-ta_remote +application/vnd.ntt-local.sip-ta_tcp_stream +application/vnd.oasis.opendocument.chart odc +application/vnd.oasis.opendocument.chart-template otc +application/vnd.oasis.opendocument.database odb +application/vnd.oasis.opendocument.formula odf +# otf: font/otf +application/vnd.oasis.opendocument.formula-template +application/vnd.oasis.opendocument.graphics odg +application/vnd.oasis.opendocument.graphics-template otg +application/vnd.oasis.opendocument.image odi +application/vnd.oasis.opendocument.image-template oti +application/vnd.oasis.opendocument.presentation odp +application/vnd.oasis.opendocument.presentation-template otp +application/vnd.oasis.opendocument.spreadsheet ods +application/vnd.oasis.opendocument.spreadsheet-template ots +application/vnd.oasis.opendocument.text odt +application/vnd.oasis.opendocument.text-master odm +application/vnd.oasis.opendocument.text-template ott +application/vnd.oasis.opendocument.text-web oth +application/vnd.obn +application/vnd.ocf+cbor +application/vnd.oftn.l10n+json +application/vnd.oipf.contentaccessdownload+xml +application/vnd.oipf.contentaccessstreaming+xml +application/vnd.oipf.cspg-hexbinary +application/vnd.oipf.dae.svg+xml +application/vnd.oipf.dae.xhtml+xml +application/vnd.oipf.mippvcontrolmessage+xml +application/vnd.oipf.pae.gem +application/vnd.oipf.spdiscovery+xml +application/vnd.oipf.spdlist+xml +application/vnd.oipf.ueprofile+xml +application/vnd.olpc-sugar xo +application/vnd.oma.bcast.associated-procedure-parameter+xml +application/vnd.oma.bcast.drm-trigger+xml +application/vnd.oma.bcast.imd+xml +application/vnd.oma.bcast.ltkm +application/vnd.oma.bcast.notification+xml +application/vnd.oma.bcast.provisioningtrigger +application/vnd.oma.bcast.sgboot +application/vnd.oma.bcast.sgdd+xml +application/vnd.oma.bcast.sgdu +application/vnd.oma.bcast.simple-symbol-container +application/vnd.oma.bcast.smartcard-trigger+xml +application/vnd.oma.bcast.sprov+xml +application/vnd.oma.bcast.stkm +application/vnd.oma.cab-address-book+xml +application/vnd.oma.cab-feature-handler+xml +application/vnd.oma.cab-pcc+xml +application/vnd.oma.cab-subs-invite+xml +application/vnd.oma.cab-user-prefs+xml +application/vnd.oma.dcd +application/vnd.oma.dcdc +application/vnd.oma.dd2+xml dd2 +application/vnd.oma.drm.risd+xml +application/vnd.oma.group-usage-list+xml +application/vnd.oma.lwm2m+json +application/vnd.oma.lwm2m+tlv +application/vnd.oma.pal+xml +application/vnd.oma.poc.detailed-progress-report+xml +application/vnd.oma.poc.final-report+xml +application/vnd.oma.poc.groups+xml +application/vnd.oma.poc.invocation-descriptor+xml +application/vnd.oma.poc.optimized-progress-report+xml +application/vnd.oma.push +application/vnd.oma.scidm.messages+xml +application/vnd.oma.xcap-directory+xml +application/vnd.oma-scws-config +application/vnd.oma-scws-http-request +application/vnd.oma-scws-http-response +application/vnd.omads-email+xml +application/vnd.omads-file+xml +application/vnd.omads-folder+xml +application/vnd.omaloc-supl-init +application/vnd.onepager tam +application/vnd.onepagertamp tamp +application/vnd.onepagertamx tamx +application/vnd.onepagertat tat +application/vnd.onepagertatp tatp +application/vnd.onepagertatx tatx +application/vnd.openblox.game+xml obgx +application/vnd.openblox.game-binary obg +application/vnd.openeye.oeb oeb +application/vnd.openofficeorg.extension oxt +application/vnd.openstreetmap.data+xml osm +application/vnd.openxmlformats-officedocument.custom-properties+xml +application/vnd.openxmlformats-officedocument.customXmlProperties+xml +application/vnd.openxmlformats-officedocument.drawing+xml +application/vnd.openxmlformats-officedocument.drawingml.chart+xml +application/vnd.openxmlformats-officedocument.drawingml.chartshapes+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramColors+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramData+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramLayout+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramStyle+xml +application/vnd.openxmlformats-officedocument.extended-properties+xml +application/vnd.openxmlformats-officedocument.presentationml.commentAuthors+xml +application/vnd.openxmlformats-officedocument.presentationml.comments+xml +application/vnd.openxmlformats-officedocument.presentationml.handoutMaster+xml +application/vnd.openxmlformats-officedocument.presentationml.notesMaster+xml +application/vnd.openxmlformats-officedocument.presentationml.notesSlide+xml +application/vnd.openxmlformats-officedocument.presentationml.presProps+xml +application/vnd.openxmlformats-officedocument.presentationml.presentation pptx +application/vnd.openxmlformats-officedocument.presentationml.presentation.main+xml +application/vnd.openxmlformats-officedocument.presentationml.slide sldx +application/vnd.openxmlformats-officedocument.presentationml.slide+xml +application/vnd.openxmlformats-officedocument.presentationml.slideLayout+xml +application/vnd.openxmlformats-officedocument.presentationml.slideMaster+xml +application/vnd.openxmlformats-officedocument.presentationml.slideUpdateInfo+xml +application/vnd.openxmlformats-officedocument.presentationml.slideshow ppsx +application/vnd.openxmlformats-officedocument.presentationml.slideshow.main+xml +application/vnd.openxmlformats-officedocument.presentationml.tableStyles+xml +application/vnd.openxmlformats-officedocument.presentationml.tags+xml +application/vnd.openxmlformats-officedocument.presentationml.template potx +application/vnd.openxmlformats-officedocument.presentationml.template.main+xml +application/vnd.openxmlformats-officedocument.presentationml.viewProps+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.calcChain+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.connections+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.dialogsheet+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.externalLink+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCacheDefinition+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCacheRecords+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.pivotTable+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.queryTable+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.revisionHeaders+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.revisionLog+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.sheet xlsx +application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.sheetMetadata+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.table+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.tableSingleCells+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.template xltx +application/vnd.openxmlformats-officedocument.spreadsheetml.template.main+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.userNames+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.volatileDependencies+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml +application/vnd.openxmlformats-officedocument.theme+xml +application/vnd.openxmlformats-officedocument.themeOverride+xml +application/vnd.openxmlformats-officedocument.vmlDrawing +application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.document docx +application/vnd.openxmlformats-officedocument.wordprocessingml.document.glossary+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.endnotes+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.fontTable+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.template dotx +application/vnd.openxmlformats-officedocument.wordprocessingml.template.main+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml +application/vnd.openxmlformats-package.core-properties+xml +application/vnd.openxmlformats-package.digital-signature-xmlsignature+xml +application/vnd.openxmlformats-package.relationships+xml +application/vnd.oracle.resource+json +application/vnd.orange.indata +application/vnd.osa.netdeploy ndc +application/vnd.osgeo.mapguide.package mgp +# jar: application/x-java-archive +application/vnd.osgi.bundle +application/vnd.osgi.dp dp +application/vnd.osgi.subsystem esa +application/vnd.otps.ct-kip+xml +application/vnd.oxli.countgraph oxlicg +application/vnd.pagerduty+json +application/vnd.palm prc pdb pqa oprc +application/vnd.panoply plp +application/vnd.paos+xml +application/vnd.pawaafile paw +application/vnd.pcos +application/vnd.pg.format str +application/vnd.pg.osasli ei6 +application/vnd.piaccess.application-license pil +application/vnd.picsel efif +application/vnd.pmi.widget wg +application/vnd.poc.group-advertisement+xml +application/vnd.pocketlearn plf +application/vnd.powerbuilder6 pbd +application/vnd.powerbuilder6-s +application/vnd.powerbuilder7 +application/vnd.powerbuilder7-s +application/vnd.powerbuilder75 +application/vnd.powerbuilder75-s +application/vnd.preminet preminet +application/vnd.previewsystems.box box vbox +application/vnd.proteus.magazine mgz +application/vnd.publishare-delta-tree qps +# pti: image/prs.pti +application/vnd.pvi.ptid1 ptid +application/vnd.pwg-multiplexed +application/vnd.pwg-xhtml-print+xml +application/vnd.qualcomm.brew-app-res bar +application/vnd.quarantainenet +application/vnd.Quark.QuarkXPress qxd qxt qwd qwt qxl qxb +application/vnd.quobject-quoxdocument quox quiz +application/vnd.radisys.moml+xml +application/vnd.radisys.msml-audit-conf+xml +application/vnd.radisys.msml-audit-conn+xml +application/vnd.radisys.msml-audit-dialog+xml +application/vnd.radisys.msml-audit-stream+xml +application/vnd.radisys.msml-audit+xml +application/vnd.radisys.msml-conf+xml +application/vnd.radisys.msml-dialog-base+xml +application/vnd.radisys.msml-dialog-fax-detect+xml +application/vnd.radisys.msml-dialog-fax-sendrecv+xml +application/vnd.radisys.msml-dialog-group+xml +application/vnd.radisys.msml-dialog-speech+xml +application/vnd.radisys.msml-dialog-transform+xml +application/vnd.radisys.msml-dialog+xml +application/vnd.radisys.msml+xml +application/vnd.rainstor.data tree +application/vnd.rapid +application/vnd.rar rar +application/vnd.realvnc.bed bed +application/vnd.recordare.musicxml mxl +application/vnd.recordare.musicxml+xml +application/vnd.RenLearn.rlprint +application/vnd.rig.cryptonote cryptonote +application/vnd.route66.link66+xml link66 +# gbr: application/rpki-ghostbusters +application/vnd.rs-274x +application/vnd.ruckus.download +application/vnd.s3sms +application/vnd.sailingtracker.track st +application/vnd.sbm.cid +application/vnd.sbm.mid2 +application/vnd.scribus scd sla slaz +application/vnd.sealed.3df s3df +application/vnd.sealed.csf scsf +application/vnd.sealed.doc sdoc sdo s1w +application/vnd.sealed.eml seml sem +application/vnd.sealed.mht smht smh +application/vnd.sealed.net +# spp: application/scvp-vp-response +application/vnd.sealed.ppt sppt s1p +application/vnd.sealed.tiff stif +application/vnd.sealed.xls sxls sxl s1e +# stm: audio/x-stm +application/vnd.sealedmedia.softseal.html stml s1h +application/vnd.sealedmedia.softseal.pdf spdf spd s1a +application/vnd.seemail see +application/vnd.sema sema +application/vnd.semd semd +application/vnd.semf semf +application/vnd.shana.informed.formdata ifm +application/vnd.shana.informed.formtemplate itp +application/vnd.shana.informed.interchange iif +application/vnd.shana.informed.package ipk +application/vnd.SimTech-MindMapper twd twds +application/vnd.siren+json +application/vnd.smaf mmf +application/vnd.smart.notebook notebook +application/vnd.smart.teacher teacher +application/vnd.software602.filler.form+xml fo +application/vnd.software602.filler.form-xml-zip zfo +application/vnd.solent.sdkm+xml sdkm sdkd +application/vnd.spotfire.dxp dxp +application/vnd.spotfire.sfs sfs +application/vnd.sss-cod +application/vnd.sss-dtf +application/vnd.sss-ntf +application/vnd.stepmania.package smzip +application/vnd.stepmania.stepchart sm +application/vnd.street-stream +application/vnd.sun.wadl+xml wadl +application/vnd.sus-calendar sus susp +application/vnd.svd +application/vnd.swiftview-ics +application/vnd.syncml+xml xsm +application/vnd.syncml.dm+wbxml bdm +application/vnd.syncml.dm+xml xdm +application/vnd.syncml.dm.notification +application/vnd.syncml.dmddf+wbxml +application/vnd.syncml.dmddf+xml ddf +application/vnd.syncml.dmtnds+wbxml +application/vnd.syncml.dmtnds+xml +application/vnd.syncml.ds.notification +application/vnd.tableschema+json +application/vnd.tao.intent-module-archive tao +application/vnd.tcpdump.pcap pcap cap dmp +application/vnd.theqvd qvd +application/vnd.tmd.mediaflex.api+xml +application/vnd.tml vfr viaframe +application/vnd.tmobile-livetv tmo +application/vnd.tri.onesource +application/vnd.trid.tpt tpt +application/vnd.triscape.mxs mxs +application/vnd.trueapp tra +application/vnd.truedoc +# cab: application/vnd.ms-cab-compressed +application/vnd.ubisoft.webplayer +application/vnd.ufdl ufdl ufd frm +application/vnd.uiq.theme utz +application/vnd.umajin umj +application/vnd.unity unityweb +application/vnd.uoml+xml uoml uo +application/vnd.uplanet.alert +application/vnd.uplanet.alert-wbxml +application/vnd.uplanet.bearer-choice +application/vnd.uplanet.bearer-choice-wbxml +application/vnd.uplanet.cacheop +application/vnd.uplanet.cacheop-wbxml +application/vnd.uplanet.channel +application/vnd.uplanet.channel-wbxml +application/vnd.uplanet.list +application/vnd.uplanet.list-wbxml +application/vnd.uplanet.listcmd +application/vnd.uplanet.listcmd-wbxml +application/vnd.uplanet.signal +application/vnd.uri-map urim urimap +application/vnd.valve.source.material vmt +application/vnd.vcx vcx +# sxi: application/vnd.sun.xml.impress +application/vnd.vd-study mxi study-inter model-inter +# mcd: application/vnd.mcd +application/vnd.vectorworks vwx +application/vnd.vel+json +application/vnd.verimatrix.vcas +application/vnd.vidsoft.vidconference vsc +application/vnd.visio vsd vst vsw vss +application/vnd.visionary vis +# vsc: application/vnd.vidsoft.vidconference +application/vnd.vividence.scriptfile +application/vnd.vsf vsf +application/vnd.wap.sic sic +application/vnd.wap.slc slc +application/vnd.wap.wbxml wbxml +application/vnd.wap.wmlc wmlc +application/vnd.wap.wmlscriptc wmlsc +application/vnd.webturbo wtb +application/vnd.wfa.p2p p2p +application/vnd.wfa.wsc wsc +application/vnd.windows.devicepairing +application/vnd.wmc wmc +application/vnd.wmf.bootstrap +# nb: application/mathematica for now +application/vnd.wolfram.mathematica +application/vnd.wolfram.mathematica.package m +application/vnd.wolfram.player nbp +application/vnd.wordperfect wpd +application/vnd.wqd wqd +application/vnd.wrq-hp3000-labelled +application/vnd.wt.stf stf +application/vnd.wv.csp+xml +application/vnd.wv.csp+wbxml wv +application/vnd.wv.ssp+xml +application/vnd.xacml+json +application/vnd.xara xar +application/vnd.xfdl xfdl xfd +application/vnd.xfdl.webform +application/vnd.xmi+xml +application/vnd.xmpie.cpkg cpkg +application/vnd.xmpie.dpkg dpkg +# dpkg: application/vnd.xmpie.dpkg +application/vnd.xmpie.plan +application/vnd.xmpie.ppkg ppkg +application/vnd.xmpie.xlim xlim +application/vnd.yamaha.hv-dic hvd +application/vnd.yamaha.hv-script hvs +application/vnd.yamaha.hv-voice hvp +application/vnd.yamaha.openscoreformat osf +application/vnd.yamaha.openscoreformat.osfpvg+xml +application/vnd.yamaha.remote-setup +application/vnd.yamaha.smaf-audio saf +application/vnd.yamaha.smaf-phrase spf +application/vnd.yamaha.through-ngn +application/vnd.yamaha.tunnel-udpencap +application/vnd.yaoweme yme +application/vnd.yellowriver-custom-menu cmp +application/vnd.zul zir zirz +application/vnd.zzazz.deck+xml zaz +application/voicexml+xml vxml +application/vq-rtcp-xr +application/watcherinfo+xml wif +application/whoispp-query +application/whoispp-response +application/widget wgt +application/wita +application/wordperfect5.1 +application/wsdl+xml wsdl +application/wspolicy+xml wspolicy +# yes, this *is* IANA registered despite of x- +application/x-www-form-urlencoded +application/x400-bp +application/xacml+xml +application/xcap-att+xml xav +application/xcap-caps+xml xca +application/xcap-diff+xml xdf +application/xcap-el+xml xel +application/xcap-error+xml xer +application/xcap-ns+xml xns +application/xcon-conference-info-diff+xml +application/xcon-conference-info+xml +application/xenc+xml +application/xhtml+xml xhtml xhtm xht +# xml, xsd, rng: text/xml +application/xml +# mod: audio/x-mod +application/xml-dtd dtd +# ent: text/xml-external-parsed-entity +application/xml-external-parsed-entity +application/xml-patch+xml +application/xmpp+xml +application/xop+xml xop +application/xslt+xml xsl xslt +application/xv+xml mxml xhvml xvml xvm +application/yang yang +application/yang-data+json +application/yang-data+xml +application/yang-patch+json +application/yang-patch+xml +application/yin+xml yin +application/zip zip +application/zlib +audio/1d-interleaved-parityfec +audio/32kadpcm 726 +# 3gp, 3gpp: video/3gpp +audio/3gpp +# 3g2, 3gpp2: video/3gpp2 +audio/3gpp2 +audio/ac3 ac3 +audio/AMR amr +audio/AMR-WB awb +audio/amr-wb+ +audio/aptx +audio/asc acn +# aa3, omg: audio/ATRAC3 +audio/ATRAC-ADVANCED-LOSSLESS aal +# aa3, omg: audio/ATRAC3 +audio/ATRAC-X atx +audio/ATRAC3 at3 aa3 omg +audio/basic au snd +audio/BV16 +audio/BV32 +audio/clearmode +audio/CN +audio/DAT12 +audio/dls dls +audio/dsr-es201108 +audio/dsr-es202050 +audio/dsr-es202211 +audio/dsr-es202212 +audio/DV +audio/DVI4 +audio/eac3 +audio/encaprtp +audio/EVRC evc +# qcp: audio/qcelp +audio/EVRC-QCP +audio/EVRC0 +audio/EVRC1 +audio/EVRCB evb +audio/EVRCB0 +audio/EVRCB1 +audio/EVRCNW enw +audio/EVRCNW0 +audio/EVRCNW1 +audio/EVRCWB evw +audio/EVRCWB0 +audio/EVRCWB1 +audio/EVS +audio/example +audio/fwdred +audio/G711-0 +audio/G719 +audio/G722 +audio/G7221 +audio/G723 +audio/G726-16 +audio/G726-24 +audio/G726-32 +audio/G726-40 +audio/G728 +audio/G729 +audio/G7291 +audio/G729D +audio/G729E +audio/GSM +audio/GSM-EFR +audio/GSM-HR-08 +audio/iLBC lbc +audio/ip-mr_v2.5 +# wav: audio/x-wav +audio/L16 l16 +audio/L20 +audio/L24 +audio/L8 +audio/LPC +audio/MELP +audio/MELP600 +audio/MELP1200 +audio/MELP2400 +audio/mobile-xmf mxmf +# mp4, mpg4: video/mp4, see RFC 4337 +audio/mp4 m4a +audio/MP4A-LATM +audio/MPA +audio/mpa-robust +audio/mpeg mp3 mpga mp1 mp2 +audio/mpeg4-generic +audio/ogg oga ogg opus spx +audio/opus +audio/parityfec +audio/PCMA +audio/PCMA-WB +audio/PCMU +audio/PCMU-WB +audio/prs.sid sid psid +audio/qcelp qcp +audio/raptorfec +audio/RED +audio/rtp-enc-aescm128 +audio/rtp-midi +audio/rtploopback +audio/rtx +audio/SMV smv +# qcp: audio/qcelp, see RFC 3625 +audio/SMV-QCP +audio/SMV0 +# mid: audio/midi +audio/sp-midi +audio/speex +audio/t140c +audio/t38 +audio/telephone-event +audio/tone +audio/UEMCLIP +audio/ulpfec +audio/VDVI +audio/VMR-WB +audio/vnd.3gpp.iufp +audio/vnd.4SB +audio/vnd.audikoz koz +audio/vnd.CELP +audio/vnd.cisco.nse +audio/vnd.cmles.radio-events +audio/vnd.cns.anp1 +audio/vnd.cns.inf1 +audio/vnd.dece.audio uva uvva +audio/vnd.digital-winds eol +audio/vnd.dlna.adts +audio/vnd.dolby.heaac.1 +audio/vnd.dolby.heaac.2 +audio/vnd.dolby.mlp mlp +audio/vnd.dolby.mps +audio/vnd.dolby.pl2 +audio/vnd.dolby.pl2x +audio/vnd.dolby.pl2z +audio/vnd.dolby.pulse.1 +audio/vnd.dra +# wav: audio/x-wav, cpt: application/mac-compactpro +audio/vnd.dts dts +audio/vnd.dts.hd dtshd +# dvb: video/vnd.dvb.file +audio/vnd.dvb.file +audio/vnd.everad.plj plj +# rm: audio/x-pn-realaudio +audio/vnd.hns.audio +audio/vnd.lucent.voice lvp +audio/vnd.ms-playready.media.pya pya +# mxmf: audio/mobile-xmf +audio/vnd.nokia.mobile-xmf +audio/vnd.nortel.vbk vbk +audio/vnd.nuera.ecelp4800 ecelp4800 +audio/vnd.nuera.ecelp7470 ecelp7470 +audio/vnd.nuera.ecelp9600 ecelp9600 +audio/vnd.octel.sbc +# audio/vnd.qcelp deprecated in favour of audio/qcelp +audio/vnd.rhetorex.32kadpcm +audio/vnd.rip rip +audio/vnd.sealedmedia.softseal.mpeg smp3 smp s1m +audio/vnd.vmx.cvsd +audio/vorbis +audio/vorbis-config +font/collection ttc +font/otf otf +font/sfnt +font/ttf ttf +font/woff woff +font/woff2 woff2 +image/bmp bmp dib +image/cgm cgm +image/dicom-rle drle +image/emf emf +image/example +image/fits fits fit fts +image/g3fax +image/gif gif +image/ief ief +image/jls jls +image/jp2 jp2 jpg2 +image/jpeg jpg jpeg jpe jfif +image/jpm jpm jpgm +image/jpx jpx jpf +image/ktx ktx +image/naplps +image/png png +image/prs.btif btif btf +image/prs.pti pti +image/pwg-raster +image/svg+xml svg svgz +image/t38 t38 +image/tiff tiff tif +image/tiff-fx tfx +image/vnd.adobe.photoshop psd +image/vnd.airzip.accelerator.azv azv +image/vnd.cns.inf2 +image/vnd.dece.graphic uvi uvvi uvg uvvg +image/vnd.djvu djvu djv +# sub: text/vnd.dvb.subtitle +image/vnd.dvb.subtitle +image/vnd.dwg dwg +image/vnd.dxf dxf +image/vnd.fastbidsheet fbs +image/vnd.fpx fpx +image/vnd.fst fst +image/vnd.fujixerox.edmics-mmr mmr +image/vnd.fujixerox.edmics-rlc rlc +image/vnd.globalgraphics.pgb pgb +image/vnd.microsoft.icon ico +image/vnd.mix +image/vnd.mozilla.apng apng +image/vnd.ms-modi mdi +image/vnd.net-fpx +image/vnd.radiance hdr rgbe xyze +image/vnd.sealed.png spng spn s1n +image/vnd.sealedmedia.softseal.gif sgif sgi s1g +image/vnd.sealedmedia.softseal.jpg sjpg sjp s1j +image/vnd.svf +image/vnd.tencent.tap tap +image/vnd.valve.source.texture vtf +image/vnd.wap.wbmp wbmp +image/vnd.xiff xif +image/vnd.zbrush.pcx pcx +image/wmf wmf +message/CPIM +message/delivery-status +message/disposition-notification +message/example +message/external-body +message/feedback-report +message/global u8msg +message/global-delivery-status u8dsn +message/global-disposition-notification u8mdn +message/global-headers u8hdr +message/http +# cl: application/simple-filter+xml +message/imdn+xml +# message/news obsoleted by message/rfc822 +message/partial +message/rfc822 eml mail art +message/s-http +message/sip +message/sipfrag +message/tracking-status +message/vnd.si.simp +# wsc: application/vnd.wfa.wsc +message/vnd.wfa.wsc +model/example +model/gltf+json gltf +model/iges igs iges +model/mesh msh mesh silo +model/vnd.collada+xml dae +model/vnd.dwf dwf +# 3dml, 3dm: text/vnd.in3d.3dml +model/vnd.flatland.3dml +model/vnd.gdl gdl gsm win dor lmp rsm msm ism +model/vnd.gs-gdl +model/vnd.gtw gtw +model/vnd.moml+xml moml +model/vnd.mts mts +model/vnd.opengex ogex +model/vnd.parasolid.transmit.binary x_b xmt_bin +model/vnd.parasolid.transmit.text x_t xmt_txt +model/vnd.rosette.annotated-data-model +model/vnd.valve.source.compiled-map bsp +model/vnd.vtu vtu +model/vrml wrl vrml +# x3db: model/x3d+xml +model/x3d+fastinfoset +# x3d: application/vnd.hzn-3d-crossword +model/x3d+xml x3db +model/x3d-vrml x3dv x3dvz +multipart/alternative +multipart/appledouble +multipart/byteranges +multipart/digest +multipart/encrypted +multipart/form-data +multipart/header-set +multipart/mixed +multipart/parallel +multipart/related +multipart/report +multipart/signed +multipart/vnd.bint.med-plus bmed +multipart/voice-message vpm +multipart/x-mixed-replace +text/1d-interleaved-parityfec +text/cache-manifest appcache manifest +text/calendar ics ifb +text/css css +text/csv csv +text/csv-schema csvs +text/directory +text/dns soa zone +text/encaprtp +# text/ecmascript obsoleted by application/ecmascript +text/enriched +text/example +text/fwdred +text/grammar-ref-list +text/html html htm +# text/javascript obsoleted by application/javascript +text/jcr-cnd cnd +text/markdown markdown md +text/mizar miz +text/n3 n3 +text/parameters +text/parityfec +text/plain txt asc text pm el c h cc hh cxx hxx f90 conf log +text/provenance-notation provn +text/prs.fallenstein.rst rst +text/prs.lines.tag tag dsc +text/prs.prop.logic +text/raptorfec +text/RED +text/rfc822-headers +text/richtext rtx +# rtf: application/rtf +text/rtf +text/rtp-enc-aescm128 +text/rtploopback +text/rtx +text/sgml sgml sgm +text/strings +text/t140 +text/tab-separated-values tsv +text/troff t tr roff +text/turtle ttl +text/ulpfec +text/uri-list uris uri +text/vcard vcf vcard +text/vnd.a a +text/vnd.abc abc +text/vnd.ascii-art ascii +# curl: application/vnd.curl +text/vnd.curl +text/vnd.debian.copyright copyright +text/vnd.DMClientScript dms +text/vnd.dvb.subtitle sub +text/vnd.esmertec.theme-descriptor jtd +text/vnd.fly fly +text/vnd.fmi.flexstor flx +text/vnd.graphviz gv dot +text/vnd.in3d.3dml 3dml 3dm +text/vnd.in3d.spot spot spo +text/vnd.IPTC.NewsML +text/vnd.IPTC.NITF +text/vnd.latex-z +text/vnd.motorola.reflex +text/vnd.ms-mediapackage mpf +text/vnd.net2phone.commcenter.command ccc +text/vnd.radisys.msml-basic-layout +text/vnd.si.uricatalogue uric +text/vnd.sun.j2me.app-descriptor jad +text/vnd.trolltech.linguist ts +text/vnd.wap.si si +text/vnd.wap.sl sl +text/vnd.wap.wml wml +text/vnd.wap.wmlscript wmls +text/xml xml xsd rng +text/xml-external-parsed-entity ent +video/1d-interleaved-parityfec +video/3gpp 3gp 3gpp +video/3gpp2 3g2 3gpp2 +video/3gpp-tt +video/BMPEG +video/BT656 +video/CelB +video/DV +video/encaprtp +video/example +video/H261 +video/H263 +video/H263-1998 +video/H263-2000 +video/H264 +video/H264-RCDO +video/H264-SVC +video/H265 +video/iso.segment m4s +video/JPEG +video/jpeg2000 +video/mj2 mj2 mjp2 +video/MP1S +video/MP2P +video/MP2T +video/mp4 mp4 mpg4 m4v +video/MP4V-ES +video/mpeg mpeg mpg mpe m1v m2v +video/mpeg4-generic +video/MPV +video/nv +video/ogg ogv +video/parityfec +video/pointer +video/quicktime mov qt +video/raptorfec +video/raw +video/rtp-enc-aescm128 +video/rtploopback +video/rtx +video/SMPTE292M +video/ulpfec +video/vc1 +video/vnd.CCTV +video/vnd.dece.hd uvh uvvh +video/vnd.dece.mobile uvm uvvm +video/vnd.dece.mp4 uvu uvvu +video/vnd.dece.pd uvp uvvp +video/vnd.dece.sd uvs uvvs +video/vnd.dece.video uvv uvvv +video/vnd.directv.mpeg +video/vnd.directv.mpeg-tts +video/vnd.dlna.mpeg-tts +video/vnd.dvb.file dvb +video/vnd.fvt fvt +# rm: audio/x-pn-realaudio +video/vnd.hns.video +video/vnd.iptvforum.1dparityfec-1010 +video/vnd.iptvforum.1dparityfec-2005 +video/vnd.iptvforum.2dparityfec-1010 +video/vnd.iptvforum.2dparityfec-2005 +video/vnd.iptvforum.ttsavc +video/vnd.iptvforum.ttsmpeg2 +video/vnd.motorola.video +video/vnd.motorola.videop +video/vnd.mpegurl mxu m4u +video/vnd.ms-playready.media.pyv pyv +video/vnd.nokia.interleaved-multimedia nim +video/vnd.nokia.videovoip +# mp4: video/mp4 +video/vnd.objectvideo +video/vnd.radgamettools.bink bik bk2 +video/vnd.radgamettools.smacker smk +video/vnd.sealed.mpeg1 smpg s11 +# smpg: video/vnd.sealed.mpeg1 +video/vnd.sealed.mpeg4 s14 +video/vnd.sealed.swf sswf ssw +video/vnd.sealedmedia.softseal.mov smov smo s1q +# uvu, uvvu: video/vnd.dece.mp4 +video/vnd.uvvu.mp4 +video/vnd.vivo viv +video/VP8 + +# Non-IANA types + +application/mac-compactpro cpt +application/metalink+xml metalink +application/owl+xml owx +application/rss+xml rss +application/vnd.android.package-archive apk +application/vnd.oma.dd+xml dd +application/vnd.oma.drm.content dcf +# odf: application/vnd.oasis.opendocument.formula +application/vnd.oma.drm.dcf o4a o4v +application/vnd.oma.drm.message dm +application/vnd.oma.drm.rights+wbxml drc +application/vnd.oma.drm.rights+xml dr +application/vnd.sun.xml.calc sxc +application/vnd.sun.xml.calc.template stc +application/vnd.sun.xml.draw sxd +application/vnd.sun.xml.draw.template std +application/vnd.sun.xml.impress sxi +application/vnd.sun.xml.impress.template sti +application/vnd.sun.xml.math sxm +application/vnd.sun.xml.writer sxw +application/vnd.sun.xml.writer.global sxg +application/vnd.sun.xml.writer.template stw +application/vnd.symbian.install sis +application/vnd.wap.mms-message mms +application/x-annodex anx +application/x-bcpio bcpio +application/x-bittorrent torrent +application/x-bzip2 bz2 +application/x-cdlink vcd +application/x-chrome-extension crx +application/x-cpio cpio +application/x-csh csh +application/x-director dcr dir dxr +application/x-dvi dvi +application/x-futuresplash spl +application/x-gtar gtar +application/x-hdf hdf +application/x-java-archive jar +application/x-java-jnlp-file jnlp +application/x-java-pack200 pack +application/x-killustrator kil +application/x-latex latex +application/x-netcdf nc cdf +application/x-perl pl +application/x-rpm rpm +application/x-sh sh +application/x-shar shar +application/x-stuffit sit +application/x-sv4cpio sv4cpio +application/x-sv4crc sv4crc +application/x-tar tar +application/x-tcl tcl +application/x-tex tex +application/x-texinfo texinfo texi +application/x-troff-man man 1 2 3 4 5 6 7 8 +application/x-troff-me me +application/x-troff-ms ms +application/x-ustar ustar +application/x-wais-source src +application/x-xpinstall xpi +application/x-xspf+xml xspf +application/x-xz xz +audio/midi mid midi kar +audio/x-aiff aif aiff aifc +audio/x-annodex axa +audio/x-flac flac +audio/x-matroska mka +audio/x-mod mod ult uni m15 mtm 669 med +audio/x-mpegurl m3u +audio/x-ms-wax wax +audio/x-ms-wma wma +audio/x-pn-realaudio ram rm +audio/x-realaudio ra +audio/x-s3m s3m +audio/x-stm stm +audio/x-wav wav +chemical/x-xyz xyz +image/webp webp +image/x-cmu-raster ras +image/x-portable-anymap pnm +image/x-portable-bitmap pbm +image/x-portable-graymap pgm +image/x-portable-pixmap ppm +image/x-rgb rgb +image/x-targa tga +image/x-xbitmap xbm +image/x-xpixmap xpm +image/x-xwindowdump xwd +text/html-sandboxed sandboxed +text/x-pod pod +text/x-setext etx +video/webm webm +video/x-annodex axv +video/x-flv flv +video/x-javafx fxm +video/x-matroska mkv +video/x-matroska-3d mk3d +video/x-ms-asf asx +video/x-ms-wm wm +video/x-ms-wmv wmv +video/x-ms-wmx wmx +video/x-ms-wvx wvx +video/x-msvideo avi +video/x-sgi-movie movie +x-conference/x-cooltalk ice +x-epoc/x-sisx-app sisx diff --git a/images/nginx/Makefile b/images/nginx/Makefile index f2ee09a4b..7593b2572 100644 --- a/images/nginx/Makefile +++ b/images/nginx/Makefile @@ -13,7 +13,7 @@ # limitations under the License. # 0.0.0 shouldn't clobber any released builds -TAG ?= 0.50 +TAG ?= 0.52 REGISTRY ?= quay.io/kubernetes-ingress-controller ARCH ?= $(shell go env GOARCH) DOCKER ?= docker diff --git a/images/nginx/README.md b/images/nginx/README.md index 01f4cc7fa..dc56fa80d 100644 --- a/images/nginx/README.md +++ b/images/nginx/README.md @@ -6,7 +6,6 @@ nginx [engine x] is an HTTP and reverse proxy server, a mail proxy server, and a This custom nginx image contains: - [stream](http://nginx.org/en/docs/stream/ngx_stream_core_module.html) tcp support for upstreams -- nginx stats [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) - [Dynamic TLS record sizing](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/) - [ngx_devel_kit](https://github.com/simpl/ngx_devel_kit) - [set-misc-nginx-module](https://github.com/openresty/set-misc-nginx-module) diff --git a/images/nginx/build.sh b/images/nginx/build.sh index 9c82f627d..62625ad0c 100755 --- a/images/nginx/build.sh +++ b/images/nginx/build.sh @@ -20,8 +20,7 @@ set -o nounset set -o pipefail export NGINX_VERSION=1.13.12 -export NDK_VERSION=0.3.0 -export VTS_VERSION=0.1.16 +export NDK_VERSION=0.3.1rc1 export SETMISC_VERSION=0.31 export STICKY_SESSIONS_VERSION=08a395c66e42 export MORE_HEADERS_VERSION=0.33 @@ -35,7 +34,8 @@ export MODSECURITY_VERSION=1.0.0 export LUA_NGX_VERSION=0.10.13 export LUA_UPSTREAM_VERSION=0.07 export COOKIE_FLAG_VERSION=1.1.0 -export NGINX_INFLUXDB_VERSION=f8732268d44aea706ecf8d9c6036e9b6dacc99b2 +export NGINX_INFLUXDB_VERSION=f20cfb2458c338f162132f5a21eb021e2cbe6383 +export GEOIP2_VERSION=2.0 export BUILD_PATH=/tmp/build @@ -88,21 +88,22 @@ clean-install \ lua-cjson \ python \ luarocks \ + libmaxminddb-dev \ || exit 1 if [[ ${ARCH} == "x86_64" ]]; then ln -s /usr/lib/x86_64-linux-gnu/liblua5.1.so /usr/lib/liblua.so - ln -s /usr/lib/x86_64-linux-gnu /usr/lib/lua-platform-path + ln -s /usr/lib/x86_64-linux-gnu /usr/lib/lua-platform-path fi if [[ ${ARCH} == "armv7l" ]]; then ln -s /usr/lib/arm-linux-gnueabihf/liblua5.1.so /usr/lib/liblua.so - ln -s /usr/lib/arm-linux-gnueabihf /usr/lib/lua-platform-path + ln -s /usr/lib/arm-linux-gnueabihf /usr/lib/lua-platform-path fi if [[ ${ARCH} == "aarch64" ]]; then ln -s /usr/lib/aarch64-linux-gnu/liblua5.1.so /usr/lib/liblua.so - ln -s /usr/lib/aarch64-linux-gnu /usr/lib/lua-platform-path + ln -s /usr/lib/aarch64-linux-gnu /usr/lib/lua-platform-path fi if [[ ${ARCH} == "ppc64le" ]]; then @@ -130,6 +131,8 @@ function geoip_get { geoip_get "GeoIP.dat.gz" "https://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz" geoip_get "GeoLiteCity.dat.gz" "https://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz" geoip_get "GeoIPASNum.dat.gz" "http://download.maxmind.com/download/geoip/database/asnum/GeoIPASNum.dat.gz" +geoip_get "GeoLite2-City.mmdb.gz" "http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz" +geoip_get "GeoLite2-ASN.mmdb.gz" "http://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN.tar.gz" mkdir --verbose -p "$BUILD_PATH" cd "$BUILD_PATH" @@ -138,15 +141,12 @@ cd "$BUILD_PATH" get_src fb92f5602cdb8d3ab1ad47dbeca151b185d62eedb67d347bbe9d79c1438c85de \ "http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz" -get_src 88e05a99a8a7419066f5ae75966fb1efc409bad4522d14986da074554ae61619 \ +get_src 49f50d4cd62b166bc1aaf712febec5e028d9f187cedbc27a610dfd01bdde2d36 \ "https://github.com/simpl/ngx_devel_kit/archive/v$NDK_VERSION.tar.gz" get_src 97946a68937b50ab8637e1a90a13198fe376d801dc3e7447052e43c28e9ee7de \ "https://github.com/openresty/set-misc-nginx-module/archive/v$SETMISC_VERSION.tar.gz" -get_src c668d0ed38afbba12f0224cb8cf5d70dcb9388723766dfb40d00539f887186fa \ - "https://github.com/vozlt/nginx-module-vts/archive/v$VTS_VERSION.tar.gz" - get_src a3dcbab117a9c103bc1ea5200fc00a7b7d2af97ff7fd525f16f8ac2632e30fbf \ "https://github.com/openresty/headers-more-nginx-module/archive/v$MORE_HEADERS_VERSION.tar.gz" @@ -213,9 +213,12 @@ get_src d81b33129c6fb5203b571fa4d8394823bf473d8872c0357a1d0f14420b1483bd \ get_src 76d8638a350a0484b3d6658e329ba38bb831d407eaa6dce2a084a27a22063133 \ "https://github.com/openresty/luajit2/archive/v2.1-20180420.tar.gz" -get_src e41589bd88953276c16c4817ab9b4faba1aca21d9bb70a8c1714505176c16ae4 \ +get_src 1897d7677d99c1cedeb95b2eb00652a4a7e8e604304c3053a93bd3ba7dd82884 \ "https://github.com/influxdata/nginx-influxdb-module/archive/$NGINX_INFLUXDB_VERSION.tar.gz" +get_src ebb4652c4f9a2e1ee31fddefc4c93ff78e651a4b2727d3453d026bccbd708d99 \ + "https://github.com/leev/ngx_http_geoip2_module/archive/${GEOIP2_VERSION}.tar.gz" + # improve compilation times CORES=$(($(grep -c ^processor /proc/cpuinfo) - 0)) @@ -373,6 +376,7 @@ Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-999-EXCLUSION-RULES-AFTE cd "$BUILD_PATH/nginx-$NGINX_VERSION" WITH_FLAGS="--with-debug \ + --with-compat \ --with-pcre-jit \ --with-http_ssl_module \ --with-http_stub_status_module \ @@ -405,7 +409,6 @@ fi WITH_MODULES="--add-module=$BUILD_PATH/ngx_devel_kit-$NDK_VERSION \ --add-module=$BUILD_PATH/set-misc-nginx-module-$SETMISC_VERSION \ - --add-module=$BUILD_PATH/nginx-module-vts-$VTS_VERSION \ --add-module=$BUILD_PATH/headers-more-nginx-module-$MORE_HEADERS_VERSION \ --add-module=$BUILD_PATH/nginx-goodies-nginx-sticky-module-ng-$STICKY_SESSIONS_VERSION \ --add-module=$BUILD_PATH/nginx-http-auth-digest-$NGINX_DIGEST_AUTH \ @@ -418,6 +421,7 @@ WITH_MODULES="--add-module=$BUILD_PATH/ngx_devel_kit-$NDK_VERSION \ --add-dynamic-module=$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING_VERSION/jaeger \ --add-dynamic-module=$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING_VERSION/zipkin \ --add-dynamic-module=$BUILD_PATH/ModSecurity-nginx-$MODSECURITY_VERSION \ + --add-dynamic-module=$BUILD_PATH/ngx_http_geoip2_module-${GEOIP2_VERSION} \ --add-module=$BUILD_PATH/ngx_brotli" ./configure \ diff --git a/internal/file/filesystem.go b/internal/file/filesystem.go index 53d2036a3..4a66ed263 100644 --- a/internal/file/filesystem.go +++ b/internal/file/filesystem.go @@ -25,6 +25,12 @@ import ( "k8s.io/kubernetes/pkg/util/filesystem" ) +// ReadWriteByUser defines linux permission to read and write files for the owner user +const ReadWriteByUser = 0660 + +// ReadByUserGroup defines linux permission to read files by the user and group owner/s +const ReadByUserGroup = 0640 + // Filesystem is an interface that we can use to mock various filesystem operations type Filesystem interface { filesystem.Filesystem @@ -35,7 +41,7 @@ func NewLocalFS() (Filesystem, error) { fs := filesystem.DefaultFs{} for _, directory := range directories { - err := fs.MkdirAll(directory, 0655) + err := fs.MkdirAll(directory, ReadWriteByUser) if err != nil { return nil, err } @@ -97,12 +103,5 @@ func NewFakeFS() (Filesystem, error) { } } - fakeFs.MkdirAll("/run", 0655) - fakeFs.MkdirAll("/proc", 0655) - fakeFs.MkdirAll("/etc/nginx/template", 0655) - - fakeFs.MkdirAll(DefaultSSLDirectory, 0655) - fakeFs.MkdirAll(AuthDirectory, 0655) - return fakeFs, nil } diff --git a/internal/ingress/annotations/annotations.go b/internal/ingress/annotations/annotations.go index 66337710b..edeea8c20 100644 --- a/internal/ingress/annotations/annotations.go +++ b/internal/ingress/annotations/annotations.go @@ -54,7 +54,6 @@ import ( "k8s.io/ingress-nginx/internal/ingress/annotations/sslpassthrough" "k8s.io/ingress-nginx/internal/ingress/annotations/upstreamhashby" "k8s.io/ingress-nginx/internal/ingress/annotations/upstreamvhost" - "k8s.io/ingress-nginx/internal/ingress/annotations/vtsfilterkey" "k8s.io/ingress-nginx/internal/ingress/annotations/xforwardedprefix" "k8s.io/ingress-nginx/internal/ingress/errors" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -90,7 +89,6 @@ type Ingress struct { UpstreamHashBy string LoadBalancing string UpstreamVhost string - VtsFilterKey string Whitelist ipwhitelist.SourceRange XForwardedPrefix bool SSLCiphers string @@ -132,7 +130,6 @@ func NewAnnotationExtractor(cfg resolver.Resolver) Extractor { "UpstreamHashBy": upstreamhashby.NewParser(cfg), "LoadBalancing": loadbalancing.NewParser(cfg), "UpstreamVhost": upstreamvhost.NewParser(cfg), - "VtsFilterKey": vtsfilterkey.NewParser(cfg), "Whitelist": ipwhitelist.NewParser(cfg), "XForwardedPrefix": xforwardedprefix.NewParser(cfg), "SSLCiphers": sslcipher.NewParser(cfg), diff --git a/internal/ingress/annotations/auth/main.go b/internal/ingress/annotations/auth/main.go index 5b465f9df..3a6a3fae4 100644 --- a/internal/ingress/annotations/auth/main.go +++ b/internal/ingress/annotations/auth/main.go @@ -19,8 +19,6 @@ package auth import ( "fmt" "io/ioutil" - "os" - "path" "regexp" "github.com/pkg/errors" @@ -86,17 +84,6 @@ type auth struct { // NewParser creates a new authentication annotation parser func NewParser(authDirectory string, r resolver.Resolver) parser.IngressAnnotation { - os.MkdirAll(authDirectory, 0755) - - currPath := authDirectory - for currPath != "/" { - currPath = path.Dir(currPath) - err := os.Chmod(currPath, 0755) - if err != nil { - break - } - } - return auth{r, authDirectory} } @@ -157,8 +144,7 @@ func dumpSecret(filename string, secret *api.Secret) error { } } - // TODO: check permissions required - err := ioutil.WriteFile(filename, val, 0777) + err := ioutil.WriteFile(filename, val, file.ReadWriteByUser) if err != nil { return ing_errors.LocationDenied{ Reason: errors.Wrap(err, "unexpected error creating password file"), diff --git a/internal/ingress/annotations/vtsfilterkey/main.go b/internal/ingress/annotations/vtsfilterkey/main.go deleted file mode 100644 index e349c36a6..000000000 --- a/internal/ingress/annotations/vtsfilterkey/main.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtsfilterkey - -import ( - extensions "k8s.io/api/extensions/v1beta1" - - "k8s.io/ingress-nginx/internal/ingress/annotations/parser" - "k8s.io/ingress-nginx/internal/ingress/resolver" -) - -type vtsFilterKey struct { - r resolver.Resolver -} - -// NewParser creates a new vts filter key annotation parser -func NewParser(r resolver.Resolver) parser.IngressAnnotation { - return vtsFilterKey{r} -} - -// Parse parses the annotations contained in the ingress rule -// used to indicate if the location/s contains a fragment of -// configuration to be included inside the paths of the rules -func (a vtsFilterKey) Parse(ing *extensions.Ingress) (interface{}, error) { - return parser.GetStringAnnotation("vts-filter-key", ing) -} diff --git a/internal/ingress/controller/checker.go b/internal/ingress/controller/checker.go index 0d6eaca7c..92096eee6 100644 --- a/internal/ingress/controller/checker.go +++ b/internal/ingress/controller/checker.go @@ -26,6 +26,8 @@ import ( "github.com/pkg/errors" ) +const nginxPID = "/tmp/nginx.pid" + // Name returns the healthcheck name func (n NGINXController) Name() string { return "nginx-ingress-controller" @@ -58,13 +60,13 @@ func (n *NGINXController) Check(_ *http.Request) error { if err != nil { return errors.Wrap(err, "unexpected error reading /proc directory") } - f, err := n.fileSystem.ReadFile("/run/nginx.pid") + f, err := n.fileSystem.ReadFile(nginxPID) if err != nil { - return errors.Wrap(err, "unexpected error reading /run/nginx.pid") + return errors.Wrapf(err, "unexpected error reading %v", nginxPID) } pid, err := strconv.Atoi(strings.TrimRight(string(f), "\r\n")) if err != nil { - return errors.Wrap(err, "unexpected error reading the PID from /run/nginx.pid") + return errors.Wrapf(err, "unexpected error reading the nginx PID from %v", nginxPID) } _, err = fs.NewProc(pid) diff --git a/internal/ingress/controller/checker_test.go b/internal/ingress/controller/checker_test.go index cb3aca44e..4e2385cf4 100644 --- a/internal/ingress/controller/checker_test.go +++ b/internal/ingress/controller/checker_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/apiserver/pkg/server/healthz" "k8s.io/kubernetes/pkg/util/filesystem" + "k8s.io/ingress-nginx/internal/file" ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" ) @@ -60,8 +61,8 @@ func TestNginxCheck(t *testing.T) { }) // create pid file - fs.MkdirAll("/run", 0655) - pidFile, err := fs.Create("/run/nginx.pid") + fs.MkdirAll("/tmp", file.ReadWriteByUser) + pidFile, err := fs.Create(nginxPID) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/internal/ingress/controller/config/config.go b/internal/ingress/controller/config/config.go index 436bb0612..a5f393073 100644 --- a/internal/ingress/controller/config/config.go +++ b/internal/ingress/controller/config/config.go @@ -161,31 +161,6 @@ type Configuration struct { // By default this is enabled IgnoreInvalidHeaders bool `json:"ignore-invalid-headers"` - // EnableVtsStatus allows the replacement of the default status page with a third party module named - // nginx-module-vts - https://github.com/vozlt/nginx-module-vts - // By default this is disabled - EnableVtsStatus bool `json:"enable-vts-status,omitempty"` - - // Vts config on http level - // Description: Sets parameters for a shared memory zone that will keep states for various keys. The cache is shared between all worker processe - // https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_zone - // Default value is 10m - VtsStatusZoneSize string `json:"vts-status-zone-size,omitempty"` - - // Vts config on http level - // Description: Enables the keys by user defined variable. The key is a key string to calculate traffic. - // The name is a group string to calculate traffic. The key and name can contain variables such as $host, - // $server_name. The name's group belongs to filterZones if specified. The key's group belongs to serverZones - // if not specified second argument name. The example with geoip module is as follows: - // https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key - // Default value is $geoip_country_code country::* - VtsDefaultFilterKey string `json:"vts-default-filter-key,omitempty"` - - // Description: Sets sum key used by vts json output, and the sum label in prometheus output. - // These indicate metrics values for all server zones combined, rather than for a specific one. - // Default value is * - VtsSumKey string `json:"vts-sum-key,omitempty"` - // RetryNonIdempotent since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) // in case of an error. The previous behavior can be restored using the value true RetryNonIdempotent bool `json:"retry-non-idempotent"` @@ -531,6 +506,9 @@ type Configuration struct { // http://github.com/influxdata/nginx-influxdb-module/ // By default this is disabled EnableInfluxDB bool `json:"enable-influxdb"` + + // Checksum contains a checksum of the configmap configuration + Checksum string `json:"-"` } // NewDefault returns the default nginx configuration @@ -603,9 +581,6 @@ func NewDefault() Configuration { WorkerProcesses: strconv.Itoa(runtime.NumCPU()), WorkerShutdownTimeout: "10s", LoadBalanceAlgorithm: defaultLoadBalancerAlgorithm, - VtsStatusZoneSize: "10m", - VtsDefaultFilterKey: "$geoip_country_code country::*", - VtsSumKey: "*", VariablesHashBucketSize: 128, VariablesHashMaxSize: 2048, UseHTTP2: true, diff --git a/internal/ingress/controller/controller.go b/internal/ingress/controller/controller.go index 79519e77d..92786af9a 100644 --- a/internal/ingress/controller/controller.go +++ b/internal/ingress/controller/controller.go @@ -22,7 +22,6 @@ import ( "sort" "strconv" "strings" - "sync/atomic" "time" "github.com/golang/glog" @@ -61,15 +60,15 @@ type Configuration struct { ForceNamespaceIsolation bool - // optional + // +optional TCPConfigMapName string - // optional + // +optional UDPConfigMapName string DefaultHealthzURL string DefaultSSLCertificate string - // optional + // +optional PublishService string PublishStatusAddress string @@ -98,7 +97,7 @@ type Configuration struct { DisableLua bool } -// GetPublishService returns the configured service used to set ingress status +// GetPublishService returns the Service used to set the load-balancer status of Ingresses. func (n NGINXController) GetPublishService() *apiv1.Service { s, err := n.store.GetService(n.cfg.PublishService) if err != nil { @@ -108,9 +107,9 @@ func (n NGINXController) GetPublishService() *apiv1.Service { return s } -// sync collects all the pieces required to assemble the configuration file and -// then sends the content to the backend (OnUpdate) receiving the populated -// template as response reloading the backend if is required. +// syncIngress collects all the pieces required to assemble the NGINX +// configuration file and passes the resulting data structures to the backend +// (OnUpdate) when a reload is deemed necessary. func (n *NGINXController) syncIngress(interface{}) error { n.syncRateLimiter.Accept() @@ -118,7 +117,7 @@ func (n *NGINXController) syncIngress(interface{}) error { return nil } - // Sort ingress rules using the ResourceVersion field + // sort Ingresses using the ResourceVersion field ings := n.store.ListIngresses() sort.SliceStable(ings, func(i, j int) bool { ir := ings[i].ResourceVersion @@ -136,7 +135,7 @@ func (n *NGINXController) syncIngress(interface{}) error { for _, loc := range server.Locations { if loc.Path != rootLocation { - glog.Warningf("ignoring path %v of ssl passthrough host %v", loc.Path, server.Hostname) + glog.Warningf("Ignoring SSL Passthrough for location %q in server %q", loc.Path, server.Hostname) continue } passUpstreams = append(passUpstreams, &ingress.SSLPassthroughBackend{ @@ -155,27 +154,29 @@ func (n *NGINXController) syncIngress(interface{}) error { TCPEndpoints: n.getStreamServices(n.cfg.TCPConfigMapName, apiv1.ProtocolTCP), UDPEndpoints: n.getStreamServices(n.cfg.UDPConfigMapName, apiv1.ProtocolUDP), PassthroughBackends: passUpstreams, + + ConfigurationChecksum: n.store.GetBackendConfiguration().Checksum, } - if !n.isForceReload() && n.runningConfig.Equal(&pcfg) { - glog.V(3).Infof("skipping backend reload (no changes detected)") + if n.runningConfig.Equal(&pcfg) { + glog.V(3).Infof("No configuration change detected, skipping backend reload.") return nil } - if n.cfg.DynamicConfigurationEnabled && n.IsDynamicConfigurationEnough(&pcfg) && !n.isForceReload() { - glog.Infof("skipping reload") + if n.cfg.DynamicConfigurationEnabled && n.IsDynamicConfigurationEnough(&pcfg) { + glog.Infof("Changes handled by the dynamic configuration, skipping backend reload.") } else { - glog.Infof("backend reload required") + glog.Infof("Configuration changes detected, backend reload required.") err := n.OnUpdate(pcfg) if err != nil { IncReloadErrorCount() ConfigSuccess(false) - glog.Errorf("unexpected failure restarting the backend: \n%v", err) + glog.Errorf("Unexpected failure reloading the backend:\n%v", err) return err } - glog.Infof("ingress backend successfully reloaded...") + glog.Infof("Backend successfully reloaded.") ConfigSuccess(true) IncReloadCount() setSSLExpireTime(servers) @@ -185,49 +186,45 @@ func (n *NGINXController) syncIngress(interface{}) error { isFirstSync := n.runningConfig.Equal(&ingress.Configuration{}) go func(isFirstSync bool) { if isFirstSync { - glog.Infof("first sync of Nginx configuration") + glog.Infof("Initial synchronization of the NGINX configuration.") - // it takes time for Nginx to start listening on the port + // it takes time for NGINX to start listening on the configured ports time.Sleep(1 * time.Second) } err := configureDynamically(&pcfg, n.cfg.ListenPorts.Status) if err == nil { - glog.Infof("dynamic reconfiguration succeeded") + glog.Infof("Dynamic reconfiguration succeeded.") } else { - glog.Warningf("could not dynamically reconfigure: %v", err) + glog.Warningf("Dynamic reconfiguration failed: %v", err) } }(isFirstSync) } n.runningConfig = &pcfg - n.SetForceReload(false) return nil } func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Protocol) []ingress.L4Service { - glog.V(3).Infof("obtaining information about stream services of type %v located in configmap %v", proto, configmapName) + glog.V(3).Infof("Obtaining information about %v stream services from ConfigMap %q", proto, configmapName) if configmapName == "" { - // no configmap configured return []ingress.L4Service{} } _, _, err := k8s.ParseNameNS(configmapName) if err != nil { - glog.Errorf("unexpected error reading configmap %v: %v", configmapName, err) + glog.Errorf("Error parsing ConfigMap reference %q: %v", configmapName, err) return []ingress.L4Service{} } configmap, err := n.store.GetConfigMap(configmapName) if err != nil { - glog.Errorf("unexpected error reading configmap %v: %v", configmapName, err) + glog.Errorf("Error reading ConfigMap %q: %v", configmapName, err) return []ingress.L4Service{} } var svcs []ingress.L4Service var svcProxyProtocol ingress.ProxyProtocol - // k -> port to expose - // v -> /: rp := []int{ n.cfg.ListenPorts.HTTP, @@ -239,21 +236,22 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr } reserverdPorts := sets.NewInt(rp...) - for k, v := range configmap.Data { - externalPort, err := strconv.Atoi(k) + // svcRef format: <(str)namespace>/<(str)service>:<(intstr)port>[:<(bool)decode>:<(bool)encode>] + for port, svcRef := range configmap.Data { + externalPort, err := strconv.Atoi(port) if err != nil { - glog.Warningf("%v is not valid as a TCP/UDP port", k) + glog.Warningf("%q is not a valid %v port number", port, proto) continue } if reserverdPorts.Has(externalPort) { - glog.Warningf("port %v cannot be used for TCP or UDP services. It is reserved for the Ingress controller", k) + glog.Warningf("Port %d cannot be used for %v stream services. It is reserved for the Ingress controller.", externalPort, proto) continue } - nsSvcPort := strings.Split(v, ":") + nsSvcPort := strings.Split(svcRef, ":") if len(nsSvcPort) < 2 { - glog.Warningf("invalid format (namespace/name:port:[PROXY]:[PROXY]) '%v'", k) + glog.Warningf("Invalid Service reference %q for %v port %d", svcRef, proto, externalPort) continue } @@ -262,7 +260,7 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr svcProxyProtocol.Decode = false svcProxyProtocol.Encode = false - // Proxy protocol is possible if the service is TCP + // Proxy Protocol is only compatible with TCP Services if len(nsSvcPort) >= 3 && proto == apiv1.ProtocolTCP { if len(nsSvcPort) >= 3 && strings.ToUpper(nsSvcPort[2]) == "PROXY" { svcProxyProtocol.Decode = true @@ -280,14 +278,15 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr svc, err := n.store.GetService(nsName) if err != nil { - glog.Warningf("error getting service %v: %v", nsName, err) + glog.Warningf("Error getting Service %q from local store: %v", nsName, err) continue } var endps []ingress.Endpoint targetPort, err := strconv.Atoi(svcPort) if err != nil { - glog.V(3).Infof("searching service %v endpoints using the name '%v'", svcNs, svcName, svcPort) + // not a port number, fall back to using port name + glog.V(3).Infof("Searching Endpoints with %v port name %q for Service %q", proto, svcPort, nsName) for _, sp := range svc.Spec.Ports { if sp.Name == svcPort { if sp.Protocol == proto { @@ -297,8 +296,7 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr } } } else { - // we need to use the TargetPort (where the endpoints are running) - glog.V(3).Infof("searching service %v/%v endpoints using the target port '%v'", svcNs, svcName, targetPort) + glog.V(3).Infof("Searching Endpoints with %v port number %d for Service %q", proto, targetPort, nsName) for _, sp := range svc.Spec.Ports { if sp.Port == int32(targetPort) { if sp.Protocol == proto { @@ -309,10 +307,10 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr } } - // stream services cannot contain empty upstreams and there is no - // default backend equivalent + // stream services cannot contain empty upstreams and there is + // no default backend equivalent if len(endps) == 0 { - glog.Warningf("service %v/%v does not have any active endpoints for port %v and protocol %v", svcNs, svcName, svcPort, proto) + glog.Warningf("Service %q does not have any active Endpoint for %v port %v", nsName, proto, svcPort) continue } @@ -332,9 +330,8 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr return svcs } -// getDefaultUpstream returns an upstream associated with the -// default backend service. In case of error retrieving information -// configure the upstream to return http code 503. +// getDefaultUpstream returns the upstream associated with the default backend. +// Configures the upstream to return HTTP code 503 in case of error. func (n *NGINXController) getDefaultUpstream() *ingress.Backend { upstream := &ingress.Backend{ Name: defUpstreamName, @@ -342,14 +339,14 @@ func (n *NGINXController) getDefaultUpstream() *ingress.Backend { svcKey := n.cfg.DefaultService svc, err := n.store.GetService(svcKey) if err != nil { - glog.Warningf("unexpected error searching the default backend %v: %v", n.cfg.DefaultService, err) + glog.Warningf("Unexpected error getting default backend %q from local store: %v", n.cfg.DefaultService, err) upstream.Endpoints = append(upstream.Endpoints, n.DefaultEndpoint()) return upstream } endps := getEndpoints(svc, &svc.Spec.Ports[0], apiv1.ProtocolTCP, &healthcheck.Config{}, n.store.GetServiceEndpoints) if len(endps) == 0 { - glog.Warningf("service %v does not have any active endpoints", svcKey) + glog.Warningf("Service %q does not have any active Endpoint", svcKey) endps = []ingress.Endpoint{n.DefaultEndpoint()} } @@ -358,8 +355,9 @@ func (n *NGINXController) getDefaultUpstream() *ingress.Backend { return upstream } -// getBackendServers returns a list of Upstream and Server to be used by the backend -// An upstream can be used in multiple servers if the namespace, service name and port are the same +// getBackendServers returns a list of Upstream and Server to be used by the +// backend. An upstream can be used in multiple servers if the namespace, +// service name and port are the same. func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([]*ingress.Backend, []*ingress.Server) { du := n.getDefaultUpstream() upstreams := n.createUpstreams(ingresses, du) @@ -368,7 +366,7 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] for _, ing := range ingresses { anns, err := n.store.GetIngressAnnotations(ing) if err != nil { - glog.Errorf("unexpected error reading ingress annotations: %v", err) + glog.Errorf("Unexpected error reading annotations for Ingress %q from local store: %v", ing.Name, err) } for _, rule := range ing.Spec.Rules { @@ -383,7 +381,7 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] if rule.HTTP == nil && host != defServerName { - glog.V(3).Infof("ingress rule %v/%v does not contain HTTP rules, using default backend", ing.Namespace, ing.Name) + glog.V(3).Infof("Ingress \"%v/%v\" does not contain any HTTP rule, using default backend.", ing.Namespace, ing.Name) continue } @@ -393,23 +391,21 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] if server.CertificateAuth.CAFileName == "" { server.CertificateAuth = anns.CertificateAuth - // It is possible that no CAFileName is found in the secret - if server.CertificateAuth.CAFileName == "" { - glog.V(3).Infof("secret %v does not contain 'ca.crt', mutual authentication not enabled - ingress rule %v/%v.", server.CertificateAuth.Secret, ing.Namespace, ing.Name) + if server.CertificateAuth.Secret != "" && server.CertificateAuth.CAFileName == "" { + glog.V(3).Infof("Secret %q does not contain 'ca.crt' key, mutual authentication disabled for Ingress \"%v/%v\"", server.CertificateAuth.Secret, ing.Namespace, ing.Name) } } else { - glog.V(3).Infof("server %v already contains a mutual authentication configuration - ingress rule %v/%v", server.Hostname, ing.Namespace, ing.Name) + glog.V(3).Infof("Server %v is already configured for mutual authentication (Ingress \"%v/%v\")", server.Hostname, ing.Namespace, ing.Name) } for _, path := range rule.HTTP.Paths { upsName := fmt.Sprintf("%v-%v-%v", - ing.GetNamespace(), + ing.Namespace, path.Backend.ServiceName, path.Backend.ServicePort.String()) ups := upstreams[upsName] - // if there's no path defined we assume / nginxPath := rootLocation if path.Path != "" { nginxPath = path.Path @@ -421,11 +417,11 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] addLoc = false if !loc.IsDefBackend { - glog.V(3).Infof("avoiding replacement of ingress rule %v/%v location %v upstream %v (%v)", ing.Namespace, ing.Name, loc.Path, ups.Name, loc.Backend) + glog.V(3).Infof("Location %q already configured for server %q with upstream %q (Ingress \"%v/%v\")", loc.Path, server.Hostname, loc.Backend, ing.Namespace, ing.Name) break } - glog.V(3).Infof("replacing ingress rule %v/%v location %v upstream %v (%v)", ing.Namespace, ing.Name, loc.Path, ups.Name, loc.Backend) + glog.V(3).Infof("Replacing location %q for server %q with upstream %q to use upstream %q (Ingress \"%v/%v\")", loc.Path, server.Hostname, loc.Backend, ups.Name, ing.Namespace, ing.Name) loc.Backend = ups.Name loc.IsDefBackend = false loc.Port = ups.Port @@ -441,7 +437,6 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] loc.Redirect = anns.Redirect loc.Rewrite = anns.Rewrite loc.UpstreamVhost = anns.UpstreamVhost - loc.VtsFilterKey = anns.VtsFilterKey loc.Whitelist = anns.Whitelist loc.Denied = anns.Denied loc.XForwardedPrefix = anns.XForwardedPrefix @@ -459,9 +454,10 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] break } } - // is a new location + + // new location if addLoc { - glog.V(3).Infof("adding location %v in ingress rule %v/%v upstream %v", nginxPath, ing.Namespace, ing.Name, ups.Name) + glog.V(3).Infof("Adding location %q for server %q with upstream %q (Ingress \"%v/%v\")", nginxPath, server.Hostname, ups.Name, ing.Namespace, ing.Name) loc := &ingress.Location{ Path: nginxPath, Backend: ups.Name, @@ -479,7 +475,6 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] Redirect: anns.Redirect, Rewrite: anns.Rewrite, UpstreamVhost: anns.UpstreamVhost, - VtsFilterKey: anns.VtsFilterKey, Whitelist: anns.Whitelist, Denied: anns.Denied, XForwardedPrefix: anns.XForwardedPrefix, @@ -525,15 +520,16 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] for _, location := range server.Locations { if upstream.Name == location.Backend { if len(upstream.Endpoints) == 0 { - glog.V(3).Infof("upstream %v does not have any active endpoints.", upstream.Name) + glog.V(3).Infof("Upstream %q does not have any active endpoints.", upstream.Name) + location.Backend = "" // for nginx.tmpl checking // check if the location contains endpoints and a custom default backend if location.DefaultBackend != nil { sp := location.DefaultBackend.Spec.Ports[0] endps := getEndpoints(location.DefaultBackend, &sp, apiv1.ProtocolTCP, &healthcheck.Config{}, n.store.GetServiceEndpoints) if len(endps) > 0 { - glog.V(3).Infof("using custom default backend in server %v location %v (service %v/%v)", - server.Hostname, location.Path, location.DefaultBackend.Namespace, location.DefaultBackend.Name) + glog.V(3).Infof("Using custom default backend for location %q in server %q (Service \"%v/%v\")", + location.Path, server.Hostname, location.DefaultBackend.Namespace, location.DefaultBackend.Name) nb := upstream.DeepCopy() name := fmt.Sprintf("custom-default-backend-%v", upstream.Name) nb.Name = name @@ -544,14 +540,12 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] } } - // Configure Backends[].SSLPassthrough if server.SSLPassthrough { if location.Path == rootLocation { if location.Backend == defUpstreamName { - glog.Warningf("ignoring ssl passthrough of %v as it doesn't have a default backend (root context)", server.Hostname) + glog.Warningf("Server %q has no default backend, ignoring SSL Passthrough.", server.Hostname) continue } - isHTTPSfrom = append(isHTTPSfrom, server) } } @@ -564,7 +558,7 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] } } - // create the list of upstreams and skip those without endpoints + // create the list of upstreams and skip those without Endpoints for _, upstream := range upstreams { if len(upstream.Endpoints) == 0 { continue @@ -591,8 +585,8 @@ func (n *NGINXController) getBackendServers(ingresses []*extensions.Ingress) ([] return aUpstreams, aServers } -// createUpstreams creates the NGINX upstreams for each service referenced in -// Ingress rules. The servers inside the upstream are endpoints. +// createUpstreams creates the NGINX upstreams (Endpoints) for each Service +// referenced in Ingress rules. func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingress.Backend) map[string]*ingress.Backend { upstreams := make(map[string]*ingress.Backend) upstreams[defUpstreamName] = du @@ -600,17 +594,17 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres for _, ing := range data { anns, err := n.store.GetIngressAnnotations(ing) if err != nil { - glog.Errorf("unexpected error reading ingress annotations: %v", err) + glog.Errorf("Error reading Ingress annotations: %v", err) } var defBackend string if ing.Spec.Backend != nil { defBackend = fmt.Sprintf("%v-%v-%v", - ing.GetNamespace(), + ing.Namespace, ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort.String()) - glog.V(3).Infof("creating upstream %v", defBackend) + glog.V(3).Infof("Creating upstream %q", defBackend) upstreams[defBackend] = newUpstream(defBackend) if !upstreams[defBackend].Secure { upstreams[defBackend].Secure = anns.SecureUpstream.Secure @@ -625,14 +619,13 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres upstreams[defBackend].LoadBalancing = anns.LoadBalancing } - svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName) + svcKey := fmt.Sprintf("%v/%v", ing.Namespace, ing.Spec.Backend.ServiceName) - // Add the service cluster endpoint as the upstream instead of individual endpoints - // if the serviceUpstream annotation is enabled + // add the service ClusterIP as a single Endpoint instead of individual Endpoints if anns.ServiceUpstream { endpoint, err := n.getServiceClusterEndpoint(svcKey, ing.Spec.Backend) if err != nil { - glog.Errorf("Failed to get service cluster endpoint for service %s: %v", svcKey, err) + glog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err) } else { upstreams[defBackend].Endpoints = []ingress.Endpoint{endpoint} } @@ -642,7 +635,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres endps, err := n.serviceEndpoints(svcKey, ing.Spec.Backend.ServicePort.String(), &anns.HealthCheck) upstreams[defBackend].Endpoints = append(upstreams[defBackend].Endpoints, endps...) if err != nil { - glog.Warningf("error creating upstream %v: %v", defBackend, err) + glog.Warningf("Error creating upstream %q: %v", defBackend, err) } } @@ -655,7 +648,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres for _, path := range rule.HTTP.Paths { name := fmt.Sprintf("%v-%v-%v", - ing.GetNamespace(), + ing.Namespace, path.Backend.ServiceName, path.Backend.ServicePort.String()) @@ -663,7 +656,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres continue } - glog.V(3).Infof("creating upstream %v", name) + glog.V(3).Infof("Creating upstream %q", name) upstreams[name] = newUpstream(name) upstreams[name].Port = path.Backend.ServicePort @@ -683,14 +676,13 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres upstreams[name].LoadBalancing = anns.LoadBalancing } - svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) + svcKey := fmt.Sprintf("%v/%v", ing.Namespace, path.Backend.ServiceName) - // Add the service cluster endpoint as the upstream instead of individual endpoints - // if the serviceUpstream annotation is enabled + // add the service ClusterIP as a single Endpoint instead of individual Endpoints if anns.ServiceUpstream { endpoint, err := n.getServiceClusterEndpoint(svcKey, &path.Backend) if err != nil { - glog.Errorf("failed to get service cluster endpoint for service %s: %v", svcKey, err) + glog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err) } else { upstreams[name].Endpoints = []ingress.Endpoint{endpoint} } @@ -699,7 +691,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres if len(upstreams[name].Endpoints) == 0 { endp, err := n.serviceEndpoints(svcKey, path.Backend.ServicePort.String(), &anns.HealthCheck) if err != nil { - glog.Warningf("error obtaining service endpoints: %v", err) + glog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err) continue } upstreams[name].Endpoints = endp @@ -707,7 +699,7 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres s, err := n.store.GetService(svcKey) if err != nil { - glog.Warningf("error obtaining service: %v", err) + glog.Warningf("Error obtaining Service %q: %v", svcKey, err) continue } @@ -719,20 +711,22 @@ func (n *NGINXController) createUpstreams(data []*extensions.Ingress, du *ingres return upstreams } +// getServiceClusterEndpoint returns an Endpoint corresponding to the ClusterIP +// field of a Service. func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *extensions.IngressBackend) (endpoint ingress.Endpoint, err error) { svc, err := n.store.GetService(svcKey) if err != nil { - return endpoint, fmt.Errorf("service %v does not exist", svcKey) + return endpoint, fmt.Errorf("service %q does not exist", svcKey) } if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" { - return endpoint, fmt.Errorf("No ClusterIP found for service %s", svcKey) + return endpoint, fmt.Errorf("no ClusterIP found for Service %q", svcKey) } endpoint.Address = svc.Spec.ClusterIP - // If the service port in the ingress uses a name, lookup - // the actual port in the service spec + // if the Service port is referenced by name in the Ingress, lookup the + // actual port in the service spec if backend.ServicePort.Type == intstr.String { var port int32 = -1 for _, svcPort := range svc.Spec.Ports { @@ -742,7 +736,7 @@ func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *exte } } if port == -1 { - return endpoint, fmt.Errorf("no port mapped for service %s and port name %s", svc.Name, backend.ServicePort.String()) + return endpoint, fmt.Errorf("service %q does not have a port named %q", svc.Name, backend.ServicePort) } endpoint.Port = fmt.Sprintf("%d", port) } else { @@ -752,27 +746,27 @@ func (n *NGINXController) getServiceClusterEndpoint(svcKey string, backend *exte return endpoint, err } -// serviceEndpoints returns the upstream servers (endpoints) associated -// to a service. +// serviceEndpoints returns the upstream servers (Endpoints) associated with a +// Service. func (n *NGINXController) serviceEndpoints(svcKey, backendPort string, hz *healthcheck.Config) ([]ingress.Endpoint, error) { svc, err := n.store.GetService(svcKey) var upstreams []ingress.Endpoint if err != nil { - return upstreams, fmt.Errorf("error getting service %v from the cache: %v", svcKey, err) + return upstreams, fmt.Errorf("error getting Service %q from local store: %v", svcKey, err) } - glog.V(3).Infof("obtaining port information for service %v", svcKey) + glog.V(3).Infof("Obtaining ports information for Service %q", svcKey) for _, servicePort := range svc.Spec.Ports { - // targetPort could be a string, use the name or the port (int) + // targetPort could be a string, use either the port name or number (int) if strconv.Itoa(int(servicePort.Port)) == backendPort || servicePort.TargetPort.String() == backendPort || servicePort.Name == backendPort { endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, hz, n.store.GetServiceEndpoints) if len(endps) == 0 { - glog.Warningf("service %v does not have any active endpoints", svcKey) + glog.Warningf("Service %q does not have any active Endpoint.", svcKey) } if n.cfg.SortBackends { @@ -791,11 +785,11 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string, } } - // Ingress with an ExternalName service and no port defined in the service. + // Ingress with an ExternalName Service and no port defined for that Service if len(svc.Spec.Ports) == 0 && svc.Spec.Type == apiv1.ServiceTypeExternalName { externalPort, err := strconv.Atoi(backendPort) if err != nil { - glog.Warningf("only numeric ports are allowed in ExternalName services: %v is not valid as a TCP/UDP port", backendPort) + glog.Warningf("Only numeric ports are allowed in ExternalName Services: %q is not a valid port number.", backendPort) return upstreams, nil } @@ -806,7 +800,7 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string, } endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, hz, n.store.GetServiceEndpoints) if len(endps) == 0 { - glog.Warningf("service %v does not have any active endpoints", svcKey) + glog.Warningf("Service %q does not have any active Endpoint.", svcKey) return upstreams, nil } @@ -825,17 +819,14 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string, return upstreams, nil } -// createServers initializes a map that contains information about the list of -// FDQN referenced by ingress rules and the common name field in the referenced -// SSL certificates. Each server is configured with location / using a default -// backend specified by the user or the one inside the ingress spec. +// createServers builds a map of host name to Server structs from a map of +// already computed Upstream structs. Each Server is configured with at least +// one root location, which uses a default backend if left unspecified. func (n *NGINXController) createServers(data []*extensions.Ingress, upstreams map[string]*ingress.Backend, du *ingress.Backend) map[string]*ingress.Server { servers := make(map[string]*ingress.Server, len(data)) - // If a server has a hostname equivalent to a pre-existing alias, then we - // remove the alias to avoid conflicts. aliases := make(map[string]string, len(data)) bdef := n.store.GetDefaultBackend() @@ -858,15 +849,14 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, defaultPemFileName := n.cfg.FakeCertificatePath defaultPemSHA := n.cfg.FakeCertificateSHA - // Tries to fetch the default Certificate from nginx configuration. - // If it does not exists, use the ones generated on Start() + // read custom default SSL certificate, fall back to generated default certificate defaultCertificate, err := n.store.GetLocalSSLCert(n.cfg.DefaultSSLCertificate) if err == nil { defaultPemFileName = defaultCertificate.PemFileName defaultPemSHA = defaultCertificate.PemSHA } - // initialize the default server + // initialize default server and root location servers[defServerName] = &ingress.Server{ Hostname: defServerName, SSLCert: ingress.SSLCert{ @@ -883,33 +873,34 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, }, }} - // initialize all the servers + // initialize all other servers for _, ing := range data { anns, err := n.store.GetIngressAnnotations(ing) if err != nil { - glog.Errorf("unexpected error reading ingress annotations: %v", err) + glog.Errorf("Error reading Ingress %q annotations from local store: %v", ing.Name, err) } - // default upstream server + // default upstream name un := du.Name if ing.Spec.Backend != nil { - // replace default backend - defUpstream := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort.String()) + defUpstream := fmt.Sprintf("%v-%v-%v", ing.Namespace, ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort.String()) + if backendUpstream, ok := upstreams[defUpstream]; ok { + // use backend specified in Ingress as the default backend for all its rules un = backendUpstream.Name - // Special case: - // ingress only with a backend and no rules - // this case defines a "catch all" server + // special "catch all" case, Ingress with a backend but no rule defLoc := servers[defServerName].Locations[0] if defLoc.IsDefBackend && len(ing.Spec.Rules) == 0 { + glog.Infof("Ingress \"%v/%v\" defines a backend but no rule. Using it to configure the catch-all server %q", ing.Namespace, ing.Name, defServerName) + defLoc.IsDefBackend = false defLoc.Backend = backendUpstream.Name defLoc.Service = backendUpstream.Service defLoc.Ingress = ing - // we need to use the ingress annotations + // customize using Ingress annotations defLoc.Logs = anns.Logs defLoc.BasicDigestAuth = anns.BasicDigestAuth defLoc.ClientBodyBufferSize = anns.ClientBodyBufferSize @@ -918,16 +909,17 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, defLoc.ExternalAuth = anns.ExternalAuth defLoc.Proxy = anns.Proxy defLoc.RateLimit = anns.RateLimit - // TODO: Redirect and rewrite can affect the catch all behavior. Don't use this annotations for now + // TODO: Redirect and rewrite can affect the catch all behavior, skip for now // defLoc.Redirect = anns.Redirect // defLoc.Rewrite = anns.Rewrite defLoc.UpstreamVhost = anns.UpstreamVhost - defLoc.VtsFilterKey = anns.VtsFilterKey defLoc.Whitelist = anns.Whitelist defLoc.Denied = anns.Denied defLoc.GRPC = anns.GRPC defLoc.LuaRestyWAF = anns.LuaRestyWAF defLoc.InfluxDB = anns.InfluxDB + } else { + glog.V(3).Infof("Ingress \"%v/%v\" defines both a backend and rules. Using its backend as default upstream for all its rules.", ing.Namespace, ing.Name) } } } @@ -963,7 +955,7 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, for _, ing := range data { anns, err := n.store.GetIngressAnnotations(ing) if err != nil { - glog.Errorf("unexpected error reading ingress annotations: %v", err) + glog.Errorf("Error reading Ingress %q annotations from local store: %v", ing.Name, err) } for _, rule := range ing.Spec.Rules { @@ -972,7 +964,6 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, host = defServerName } - // setup server aliases if anns.Alias != "" { if servers[host].Alias == "" { servers[host].Alias = anns.Alias @@ -980,23 +971,21 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, aliases["Alias"] = host } } else { - glog.Warningf("ingress %v/%v for host %v contains an Alias but one has already been configured.", - ing.Namespace, ing.Name, host) + glog.Warningf("Aliases already configured for server %q, skipping (Ingress \"%v/%v\")", + host, ing.Namespace, ing.Name) } } - //notifying the user that it has already been configured. - if servers[host].ServerSnippet != "" && anns.ServerSnippet != "" { - glog.Warningf("ingress %v/%v for host %v contains a Server Snippet section that it has already been configured.", - ing.Namespace, ing.Name, host) + if anns.ServerSnippet != "" { + if servers[host].ServerSnippet == "" { + servers[host].ServerSnippet = anns.ServerSnippet + } else { + glog.Warningf("Server snippet already configured for server %q, skipping (Ingress \"%v/%v\")", + host, ing.Namespace, ing.Name) + } } - // only add a server snippet if the server does not have one previously configured - if servers[host].ServerSnippet == "" && anns.ServerSnippet != "" { - servers[host].ServerSnippet = anns.ServerSnippet - } - - // only add ssl ciphers if the server does not have one previously configured + // only add SSL ciphers if the server does not have them previously configured if servers[host].SSLCiphers == "" && anns.SSLCiphers != "" { servers[host].SSLCiphers = anns.SSLCiphers } @@ -1007,14 +996,14 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, } if len(ing.Spec.TLS) == 0 { - glog.V(3).Infof("ingress %v/%v for host %v does not contains a TLS section", ing.Namespace, ing.Name, host) + glog.V(3).Infof("Ingress \"%v/%v\" does not contains a TLS section.", ing.Namespace, ing.Name) continue } tlsSecretName := extractTLSSecretName(host, ing, n.store.GetLocalSSLCert) if tlsSecretName == "" { - glog.V(3).Infof("host %v is listed on tls section but secretName is empty. Using default cert", host) + glog.V(3).Infof("Host %q is listed in the TLS section but secretName is empty. Using default certificate.", host) servers[host].SSLCert.PemFileName = defaultPemFileName servers[host].SSLCert.PemSHA = defaultPemSHA continue @@ -1023,19 +1012,19 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, key := fmt.Sprintf("%v/%v", ing.Namespace, tlsSecretName) cert, err := n.store.GetLocalSSLCert(key) if err != nil { - glog.Warningf("ssl certificate \"%v\" does not exist in local store", key) + glog.Warningf("SSL certificate %q does not exist in local store.", key) continue } err = cert.Certificate.VerifyHostname(host) if err != nil { - glog.Warningf("unexpected error validating SSL certificate %v for host %v. Reason: %v", key, host, err) + glog.Warningf("Unexpected error validating SSL certificate %q for server %q: %v", key, host, err) glog.Warningf("Validating certificate against DNS names. This will be deprecated in a future version.") - // check the common name field + // check the Common Name field // https://github.com/golang/go/issues/22922 err := verifyHostname(host, cert.Certificate) if err != nil { - glog.Warningf("ssl certificate %v does not contain a Common Name or Subject Alternative Name for host %v. Reason: %v", key, host, err) + glog.Warningf("SSL certificate %q does not contain a Common Name or Subject Alternative Name for server %q: %v", key, host, err) continue } } @@ -1043,14 +1032,14 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, servers[host].SSLCert = *cert if cert.ExpireTime.Before(time.Now().Add(240 * time.Hour)) { - glog.Warningf("ssl certificate for host %v is about to expire in 10 days", host) + glog.Warningf("SSL certificate for server %q is about to expire (%v)", cert.ExpireTime) } } } for alias, host := range aliases { if _, ok := servers[alias]; ok { - glog.Warningf("There is a conflict with server hostname '%v' and alias '%v' (in server %v). Removing alias to avoid conflicts.", alias, host) + glog.Warningf("Conflicting hostname (%v) and alias (%v) in server %q. Removing alias to avoid conflicts.", alias, host) servers[host].Alias = "" } } @@ -1058,43 +1047,28 @@ func (n *NGINXController) createServers(data []*extensions.Ingress, return servers } -func (n *NGINXController) isForceReload() bool { - return atomic.LoadInt32(&n.forceReload) != 0 -} - -// SetForceReload sets if the ingress controller should be reloaded or not -func (n *NGINXController) SetForceReload(shouldReload bool) { - if shouldReload { - atomic.StoreInt32(&n.forceReload, 1) - n.syncQueue.Enqueue(&extensions.Ingress{}) - } else { - atomic.StoreInt32(&n.forceReload, 0) - } -} - -// extractTLSSecretName returns the name of the secret that -// contains a SSL certificate for a particular hostname. -// In case there is no match, an empty string is returned. +// extractTLSSecretName returns the name of the Secret containing a SSL +// certificate for the given host name, or an empty string. func extractTLSSecretName(host string, ing *extensions.Ingress, getLocalSSLCert func(string) (*ingress.SSLCert, error)) string { + if ing == nil { return "" } + // naively return Secret name from TLS spec if host name matches for _, tls := range ing.Spec.TLS { if sets.NewString(tls.Hosts...).Has(host) { return tls.SecretName } } - // contains a TLS section but none of the host match or there - // is no hosts in the TLS section. As last resort we valide - // the host against the certificate and we use it if is valid + // no TLS host matching host name, try each TLS host for matching CN for _, tls := range ing.Spec.TLS { key := fmt.Sprintf("%v/%v", ing.Namespace, tls.SecretName) cert, err := getLocalSSLCert(key) if err != nil { - glog.Warningf("ssl certificate \"%v\" does not exist in local store", key) + glog.Warningf("SSL certificate %q does not exist in local store.", key) continue } diff --git a/internal/ingress/controller/endpoints.go b/internal/ingress/controller/endpoints.go index 2f5c9c76e..69b058fe2 100644 --- a/internal/ingress/controller/endpoints.go +++ b/internal/ingress/controller/endpoints.go @@ -29,14 +29,9 @@ import ( "k8s.io/ingress-nginx/internal/ingress/annotations/healthcheck" ) -// getEndpoints returns a list of : for a given service/target port combination. -func getEndpoints( - s *corev1.Service, - port *corev1.ServicePort, - proto corev1.Protocol, - hz *healthcheck.Config, - getServiceEndpoints func(*corev1.Service) (*corev1.Endpoints, error), -) []ingress.Endpoint { +// getEndpoints returns a list of Endpoint structs for a given service/target port combination. +func getEndpoints(s *corev1.Service, port *corev1.ServicePort, proto corev1.Protocol, hz *healthcheck.Config, + getServiceEndpoints func(*corev1.Service) (*corev1.Endpoints, error)) []ingress.Endpoint { upsServers := []ingress.Endpoint{} @@ -44,26 +39,24 @@ func getEndpoints( return upsServers } - // avoid duplicated upstream servers when the service - // contains multiple port definitions sharing the same - // targetport. - adus := make(map[string]bool) + // using a map avoids duplicated upstream servers when the service + // contains multiple port definitions sharing the same targetport + processedUpstreamServers := make(map[string]struct{}) // ExternalName services if s.Spec.Type == corev1.ServiceTypeExternalName { - glog.V(3).Infof("Ingress using a service %v of type=ExternalName : %v", s.Name) + glog.V(3).Infof("Ingress using Service %q of type ExternalName.", s.Name) targetPort := port.TargetPort.IntValue() - // check for invalid port value if targetPort <= 0 { - glog.Errorf("ExternalName service with an invalid port: %v", targetPort) + glog.Errorf("ExternalName Service %q has an invalid port (%v)", s.Name, targetPort) return upsServers } if net.ParseIP(s.Spec.ExternalName) == nil { _, err := net.LookupHost(s.Spec.ExternalName) if err != nil { - glog.Errorf("unexpected error resolving host %v: %v", s.Spec.ExternalName, err) + glog.Errorf("Error resolving host %q: %v", s.Spec.ExternalName, err) return upsServers } } @@ -76,10 +69,10 @@ func getEndpoints( }) } - glog.V(3).Infof("getting endpoints for service %v/%v and port %v", s.Namespace, s.Name, port.String()) + glog.V(3).Infof("Getting Endpoints for Service \"%v/%v\" and port %v", s.Namespace, s.Name, port.String()) ep, err := getServiceEndpoints(s) if err != nil { - glog.Warningf("unexpected error obtaining service endpoints: %v", err) + glog.Warningf("Error obtaining Endpoints for Service \"%v/%v\": %v", s.Namespace, s.Name, err) return upsServers } @@ -99,14 +92,13 @@ func getEndpoints( targetPort = epPort.Port } - // check for invalid port value if targetPort <= 0 { continue } for _, epAddress := range ss.Addresses { ep := fmt.Sprintf("%v:%v", epAddress.IP, targetPort) - if _, exists := adus[ep]; exists { + if _, exists := processedUpstreamServers[ep]; exists { continue } ups := ingress.Endpoint{ @@ -117,11 +109,11 @@ func getEndpoints( Target: epAddress.TargetRef, } upsServers = append(upsServers, ups) - adus[ep] = true + processedUpstreamServers[ep] = struct{}{} } } } - glog.V(3).Infof("endpoints found: %v", upsServers) + glog.V(3).Infof("Endpoints found for Service \"%v/%v\": %v", s.Namespace, s.Name, upsServers) return upsServers } diff --git a/internal/ingress/controller/metric/collector/scrape.go b/internal/ingress/controller/metric/collector/scrape.go deleted file mode 100644 index a078b2859..000000000 --- a/internal/ingress/controller/metric/collector/scrape.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collector - -import "github.com/prometheus/client_golang/prometheus" - -// Stopable defines a prometheus collector that can be stopped -type Stopable interface { - prometheus.Collector - Stop() -} - -type scrapeRequest struct { - results chan<- prometheus.Metric - done chan struct{} -} diff --git a/internal/ingress/controller/metric/collector/status.go b/internal/ingress/controller/metric/collector/status.go deleted file mode 100644 index e195b045d..000000000 --- a/internal/ingress/controller/metric/collector/status.go +++ /dev/null @@ -1,225 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collector - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strconv" - - "github.com/golang/glog" -) - -var ( - ac = regexp.MustCompile(`Active connections: (\d+)`) - sahr = regexp.MustCompile(`(\d+)\s(\d+)\s(\d+)`) - reading = regexp.MustCompile(`Reading: (\d+)`) - writing = regexp.MustCompile(`Writing: (\d+)`) - waiting = regexp.MustCompile(`Waiting: (\d+)`) -) - -type basicStatus struct { - // Active total number of active connections - Active int - // Accepted total number of accepted client connections - Accepted int - // Handled total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit). - Handled int - // Requests total number of client requests. - Requests int - // Reading current number of connections where nginx is reading the request header. - Reading int - // Writing current number of connections where nginx is writing the response back to the client. - Writing int - // Waiting current number of idle client connections waiting for a request. - Waiting int -} - -// https://github.com/vozlt/nginx-module-vts -type vts struct { - NginxVersion string `json:"nginxVersion"` - LoadMsec int `json:"loadMsec"` - NowMsec int `json:"nowMsec"` - // Total connections and requests(same as stub_status_module in NGINX) - Connections connections `json:"connections"` - // Traffic(in/out) and request and response counts and cache hit ratio per each server zone - ServerZones map[string]serverZone `json:"serverZones"` - // Traffic(in/out) and request and response counts and cache hit ratio per each server zone filtered through - // the vhost_traffic_status_filter_by_set_key directive - FilterZones map[string]map[string]filterZone `json:"filterZones"` - // Traffic(in/out) and request and response counts per server in each upstream group - UpstreamZones map[string][]upstreamZone `json:"upstreamZones"` -} - -type serverZone struct { - RequestCounter float64 `json:"requestCounter"` - InBytes float64 `json:"inBytes"` - OutBytes float64 `json:"outBytes"` - Responses response `json:"responses"` - Cache cache `json:"cache"` -} - -type filterZone struct { - RequestCounter float64 `json:"requestCounter"` - InBytes float64 `json:"inBytes"` - OutBytes float64 `json:"outBytes"` - Cache cache `json:"cache"` - Responses response `json:"responses"` -} - -type upstreamZone struct { - Responses response `json:"responses"` - Server string `json:"server"` - RequestCounter float64 `json:"requestCounter"` - InBytes float64 `json:"inBytes"` - OutBytes float64 `json:"outBytes"` - ResponseMsec float64 `json:"responseMsec"` - Weight float64 `json:"weight"` - MaxFails float64 `json:"maxFails"` - FailTimeout float64 `json:"failTimeout"` - Backup BoolToFloat64 `json:"backup"` - Down BoolToFloat64 `json:"down"` -} - -type cache struct { - Miss float64 `json:"miss"` - Bypass float64 `json:"bypass"` - Expired float64 `json:"expired"` - Stale float64 `json:"stale"` - Updating float64 `json:"updating"` - Revalidated float64 `json:"revalidated"` - Hit float64 `json:"hit"` - Scarce float64 `json:"scarce"` -} - -type response struct { - OneXx float64 `json:"1xx"` - TwoXx float64 `json:"2xx"` - TheeXx float64 `json:"3xx"` - FourXx float64 `json:"4xx"` - FiveXx float64 `json:"5xx"` -} - -type connections struct { - Active float64 `json:"active"` - Reading float64 `json:"reading"` - Writing float64 `json:"writing"` - Waiting float64 `json:"waiting"` - Accepted float64 `json:"accepted"` - Handled float64 `json:"handled"` - Requests float64 `json:"requests"` -} - -// BoolToFloat64 ... -type BoolToFloat64 float64 - -// UnmarshalJSON ... -func (bit BoolToFloat64) UnmarshalJSON(data []byte) error { - asString := string(data) - if asString == "1" || asString == "true" { - bit = 1 - } else if asString == "0" || asString == "false" { - bit = 0 - } else { - return fmt.Errorf(fmt.Sprintf("boolean unmarshal error: invalid input %s", asString)) - } - return nil -} - -func getNginxStatus(port int, path string) (*basicStatus, error) { - url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path) - glog.V(3).Infof("start scraping url: %v", url) - - data, err := httpBody(url) - - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err) - } - - return parse(string(data)), nil -} - -func httpBody(url string) ([]byte, error) { - resp, err := http.DefaultClient.Get(url) - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx : %v", err) - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx (%v)", err) - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, fmt.Errorf("unexpected error scraping nginx (status %v)", resp.StatusCode) - } - - return data, nil -} - -func getNginxVtsMetrics(port int, path string) (*vts, error) { - url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path) - glog.V(3).Infof("start scraping url: %v", url) - - data, err := httpBody(url) - - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx vts (%v)", err) - } - - var vts *vts - err = json.Unmarshal(data, &vts) - if err != nil { - return nil, fmt.Errorf("unexpected error json unmarshal (%v)", err) - } - glog.V(3).Infof("scrape returned : %v", vts) - return vts, nil -} - -func parse(data string) *basicStatus { - acr := ac.FindStringSubmatch(data) - sahrr := sahr.FindStringSubmatch(data) - readingr := reading.FindStringSubmatch(data) - writingr := writing.FindStringSubmatch(data) - waitingr := waiting.FindStringSubmatch(data) - - return &basicStatus{ - toInt(acr, 1), - toInt(sahrr, 1), - toInt(sahrr, 2), - toInt(sahrr, 3), - toInt(readingr, 1), - toInt(writingr, 1), - toInt(waitingr, 1), - } -} - -func toInt(data []string, pos int) int { - if len(data) == 0 { - return 0 - } - if pos > len(data) { - return 0 - } - if v, err := strconv.Atoi(data[pos]); err == nil { - return v - } - return 0 -} diff --git a/internal/ingress/controller/metric/collector/status_test.go b/internal/ingress/controller/metric/collector/status_test.go deleted file mode 100644 index 5d3075dae..000000000 --- a/internal/ingress/controller/metric/collector/status_test.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collector - -import ( - "testing" - - "github.com/kylelemons/godebug/pretty" -) - -func TestParseStatus(t *testing.T) { - tests := []struct { - in string - out *basicStatus - }{ - {`Active connections: 43 -server accepts handled requests - 7368 7368 10993 -Reading: 0 Writing: 5 Waiting: 38`, - &basicStatus{43, 7368, 7368, 10993, 0, 5, 38}, - }, - {`Active connections: 0 -server accepts handled requests - 1 7 0 -Reading: A Writing: B Waiting: 38`, - &basicStatus{0, 1, 7, 0, 0, 0, 38}, - }, - } - - for _, test := range tests { - r := parse(test.in) - if diff := pretty.Compare(r, test.out); diff != "" { - t.Logf("%v", diff) - t.Fatalf("expected %v but returned %v", test.out, r) - } - } -} - -func TestToint(t *testing.T) { - tests := []struct { - in []string - pos int - exp int - }{ - {[]string{}, 0, 0}, - {[]string{}, 1, 0}, - {[]string{"A"}, 0, 0}, - {[]string{"1"}, 0, 1}, - {[]string{"a", "2"}, 1, 2}, - } - - for _, test := range tests { - v := toInt(test.in, test.pos) - if v != test.exp { - t.Fatalf("expected %v but returned %v", test.exp, v) - } - } -} diff --git a/internal/ingress/controller/metric/collector/vts.go b/internal/ingress/controller/metric/collector/vts.go deleted file mode 100644 index 33eeac492..000000000 --- a/internal/ingress/controller/metric/collector/vts.go +++ /dev/null @@ -1,273 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collector - -import ( - "reflect" - - "github.com/golang/glog" - - "github.com/prometheus/client_golang/prometheus" -) - -const ns = "nginx" - -type ( - vtsCollector struct { - scrapeChan chan scrapeRequest - port int - path string - data *vtsData - watchNamespace string - ingressClass string - } - - vtsData struct { - bytes *prometheus.Desc - cache *prometheus.Desc - connections *prometheus.Desc - responses *prometheus.Desc - requests *prometheus.Desc - filterZoneBytes *prometheus.Desc - filterZoneResponses *prometheus.Desc - filterZoneCache *prometheus.Desc - upstreamBackup *prometheus.Desc - upstreamBytes *prometheus.Desc - upstreamDown *prometheus.Desc - upstreamFailTimeout *prometheus.Desc - upstreamMaxFails *prometheus.Desc - upstreamResponses *prometheus.Desc - upstreamRequests *prometheus.Desc - upstreamResponseMsec *prometheus.Desc - upstreamWeight *prometheus.Desc - } -) - -// NewNGINXVTSCollector returns a new prometheus collector for the VTS module -func NewNGINXVTSCollector(watchNamespace, ingressClass string, port int, path string) Stopable { - - p := vtsCollector{ - scrapeChan: make(chan scrapeRequest), - port: port, - path: path, - watchNamespace: watchNamespace, - ingressClass: ingressClass, - } - - p.data = &vtsData{ - bytes: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "bytes_total"), - "Nginx bytes count", - []string{"ingress_class", "namespace", "server_zone", "direction"}, nil), - - cache: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "cache_total"), - "Nginx cache count", - []string{"ingress_class", "namespace", "server_zone", "type"}, nil), - - connections: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "connections_total"), - "Nginx connections count", - []string{"ingress_class", "namespace", "type"}, nil), - - responses: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "responses_total"), - "The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", - []string{"ingress_class", "namespace", "server_zone", "status_code"}, nil), - - requests: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "requests_total"), - "The total number of requested client connections.", - []string{"ingress_class", "namespace", "server_zone"}, nil), - - filterZoneBytes: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "filterzone_bytes_total"), - "Nginx bytes count", - []string{"ingress_class", "namespace", "server_zone", "key", "direction"}, nil), - - filterZoneResponses: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "filterzone_responses_total"), - "The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", - []string{"ingress_class", "namespace", "server_zone", "key", "status_code"}, nil), - - filterZoneCache: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "filterzone_cache_total"), - "Nginx cache count", - []string{"ingress_class", "namespace", "server_zone", "key", "type"}, nil), - - upstreamBackup: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_backup"), - "Current backup setting of the server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamBytes: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_bytes_total"), - "The total number of bytes sent to this server.", - []string{"ingress_class", "namespace", "upstream", "server", "direction"}, nil), - - upstreamDown: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "vts_upstream_down_total"), - "Current down setting of the server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamFailTimeout: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_fail_timeout"), - "Current fail_timeout setting of the server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamMaxFails: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_maxfails"), - "Current max_fails setting of the server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamResponses: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_responses_total"), - "The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", - []string{"ingress_class", "namespace", "upstream", "server", "status_code"}, nil), - - upstreamRequests: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_requests_total"), - "The total number of client connections forwarded to this server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamResponseMsec: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_response_msecs_avg"), - "The average of only upstream response processing times in milliseconds.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - - upstreamWeight: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "upstream_weight"), - "Current upstream weight setting of the server.", - []string{"ingress_class", "namespace", "upstream", "server"}, nil), - } - - go p.start() - - return p -} - -// Describe implements prometheus.Collector. -func (p vtsCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- p.data.bytes - ch <- p.data.cache - ch <- p.data.connections - ch <- p.data.requests - ch <- p.data.responses - ch <- p.data.upstreamBackup - ch <- p.data.upstreamBytes - ch <- p.data.upstreamDown - ch <- p.data.upstreamFailTimeout - ch <- p.data.upstreamMaxFails - ch <- p.data.upstreamRequests - ch <- p.data.upstreamResponseMsec - ch <- p.data.upstreamResponses - ch <- p.data.upstreamWeight - ch <- p.data.filterZoneBytes - ch <- p.data.filterZoneCache - ch <- p.data.filterZoneResponses -} - -// Collect implements prometheus.Collector. -func (p vtsCollector) Collect(ch chan<- prometheus.Metric) { - req := scrapeRequest{results: ch, done: make(chan struct{})} - p.scrapeChan <- req - <-req.done -} - -func (p vtsCollector) start() { - for req := range p.scrapeChan { - ch := req.results - p.scrapeVts(ch) - req.done <- struct{}{} - } -} - -func (p vtsCollector) Stop() { - close(p.scrapeChan) -} - -// scrapeVts scrape nginx vts metrics -func (p vtsCollector) scrapeVts(ch chan<- prometheus.Metric) { - nginxMetrics, err := getNginxVtsMetrics(p.port, p.path) - if err != nil { - glog.Warningf("unexpected error obtaining nginx status info: %v", err) - return - } - - reflectMetrics(&nginxMetrics.Connections, p.data.connections, ch, p.ingressClass, p.watchNamespace) - - for name, zones := range nginxMetrics.UpstreamZones { - for pos, value := range zones { - reflectMetrics(&zones[pos].Responses, p.data.upstreamResponses, ch, p.ingressClass, p.watchNamespace, name, value.Server) - - ch <- prometheus.MustNewConstMetric(p.data.upstreamRequests, - prometheus.CounterValue, zones[pos].RequestCounter, p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamDown, - prometheus.CounterValue, float64(zones[pos].Down), p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamWeight, - prometheus.CounterValue, zones[pos].Weight, p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamResponseMsec, - prometheus.CounterValue, zones[pos].ResponseMsec, p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamBackup, - prometheus.CounterValue, float64(zones[pos].Backup), p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamFailTimeout, - prometheus.CounterValue, zones[pos].FailTimeout, p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamMaxFails, - prometheus.CounterValue, zones[pos].MaxFails, p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamBytes, - prometheus.CounterValue, zones[pos].InBytes, p.ingressClass, p.watchNamespace, name, value.Server, "in") - ch <- prometheus.MustNewConstMetric(p.data.upstreamBytes, - prometheus.CounterValue, zones[pos].OutBytes, p.ingressClass, p.watchNamespace, name, value.Server, "out") - } - } - - for name, zone := range nginxMetrics.ServerZones { - reflectMetrics(&zone.Responses, p.data.responses, ch, p.ingressClass, p.watchNamespace, name) - reflectMetrics(&zone.Cache, p.data.cache, ch, p.ingressClass, p.watchNamespace, name) - - ch <- prometheus.MustNewConstMetric(p.data.requests, - prometheus.CounterValue, zone.RequestCounter, p.ingressClass, p.watchNamespace, name) - ch <- prometheus.MustNewConstMetric(p.data.bytes, - prometheus.CounterValue, zone.InBytes, p.ingressClass, p.watchNamespace, name, "in") - ch <- prometheus.MustNewConstMetric(p.data.bytes, - prometheus.CounterValue, zone.OutBytes, p.ingressClass, p.watchNamespace, name, "out") - } - - for serverZone, keys := range nginxMetrics.FilterZones { - for name, zone := range keys { - reflectMetrics(&zone.Responses, p.data.filterZoneResponses, ch, p.ingressClass, p.watchNamespace, serverZone, name) - reflectMetrics(&zone.Cache, p.data.filterZoneCache, ch, p.ingressClass, p.watchNamespace, serverZone, name) - - ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes, - prometheus.CounterValue, zone.InBytes, p.ingressClass, p.watchNamespace, serverZone, name, "in") - ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes, - prometheus.CounterValue, zone.OutBytes, p.ingressClass, p.watchNamespace, serverZone, name, "out") - } - } -} - -func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) { - val := reflect.ValueOf(value).Elem() - - for i := 0; i < val.NumField(); i++ { - tag := val.Type().Field(i).Tag - l := append(labels, tag.Get("json")) - ch <- prometheus.MustNewConstMetric(desc, - prometheus.CounterValue, val.Field(i).Interface().(float64), - l...) - } -} diff --git a/internal/ingress/controller/nginx.go b/internal/ingress/controller/nginx.go index d300629f8..1739c1caf 100644 --- a/internal/ingress/controller/nginx.go +++ b/internal/ingress/controller/nginx.go @@ -38,7 +38,6 @@ import ( proxyproto "github.com/armon/go-proxyproto" "github.com/eapache/channels" apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" @@ -65,26 +64,14 @@ type statusModule string const ( ngxHealthPath = "/healthz" - - defaultStatusModule statusModule = "default" - vtsStatusModule statusModule = "vts" ) var ( - tmplPath = "/etc/nginx/template/nginx.tmpl" - cfgPath = "/etc/nginx/nginx.conf" - nginxBinary = "/usr/sbin/nginx" + tmplPath = "/etc/nginx/template/nginx.tmpl" ) // NewNGINXController creates a new NGINX Ingress controller. -// If the environment variable NGINX_BINARY exists it will be used -// as source for nginx commands func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXController { - ngx := os.Getenv("NGINX_BINARY") - if ngx == "" { - ngx = nginxBinary - } - eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ @@ -93,12 +80,10 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl h, err := dns.GetSystemNameServers() if err != nil { - glog.Warningf("unexpected error reading system nameservers: %v", err) + glog.Warningf("Error reading system nameservers: %v", err) } n := &NGINXController{ - binary: ngx, - isIPV6Enabled: ing_net.IsIPv6Enabled(), resolver: h, @@ -116,8 +101,7 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl fileSystem: fs, - // create an empty configuration. - runningConfig: &ingress.Configuration{}, + runningConfig: new(ingress.Configuration), Proxy: &TCPProxy{}, } @@ -134,8 +118,6 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl fs, n.updateCh) - n.stats = newStatsCollector(config.Namespace, class.IngressClass, n.binary, n.cfg.ListenPorts.Status) - n.syncQueue = task.NewTaskQueue(n.syncIngress) n.annotations = annotations.NewAnnotationExtractor(n.store) @@ -153,7 +135,7 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl UseNodeInternalIP: config.UseNodeInternalIP, }) } else { - glog.Warning("Update of ingress status is disabled (flag --update-status=false was specified)") + glog.Warning("Update of Ingress status is disabled (flag --update-status)") } onTemplateChange := func() { @@ -162,68 +144,66 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl // this error is different from the rest because it must be clear why nginx is not working glog.Errorf(` ------------------------------------------------------------------------------- -Error loading new template : %v +Error loading new template: %v ------------------------------------------------------------------------------- `, err) return } n.t = template - glog.Info("new NGINX template loaded") - n.SetForceReload(true) + glog.Info("New NGINX configuration template loaded.") + n.syncQueue.EnqueueTask(task.GetDummyObject("template-change")) } ngxTpl, err := ngx_template.NewTemplate(tmplPath, fs) if err != nil { - glog.Fatalf("invalid NGINX template: %v", err) + glog.Fatalf("Invalid NGINX configuration template: %v", err) } n.t = ngxTpl - // TODO: refactor if _, ok := fs.(filesystem.DefaultFs); !ok { - watch.NewDummyFileWatcher(tmplPath, onTemplateChange) - } else { + // do not setup watchers on tests + return n + } - _, err = watch.NewFileWatcher(tmplPath, onTemplateChange) + _, err = watch.NewFileWatcher(tmplPath, onTemplateChange) + if err != nil { + glog.Fatalf("Error creating file watcher for %v: %v", tmplPath, err) + } + + filesToWatch := []string{} + err = filepath.Walk("/etc/nginx/geoip/", func(path string, info os.FileInfo, err error) error { if err != nil { - glog.Fatalf("unexpected error creating file watcher: %v", err) + return err } - filesToWatch := []string{} - err := filepath.Walk("/etc/nginx/geoip/", func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - filesToWatch = append(filesToWatch, path) + if info.IsDir() { return nil + } + + filesToWatch = append(filesToWatch, path) + return nil + }) + + if err != nil { + glog.Fatalf("Error creating file watchers: %v", err) + } + + for _, f := range filesToWatch { + _, err = watch.NewFileWatcher(f, func() { + glog.Info("File %v changed. Reloading NGINX", f) + n.syncQueue.EnqueueTask(task.GetDummyObject("file-change")) }) - if err != nil { - glog.Fatalf("unexpected error creating file watcher: %v", err) + glog.Fatalf("Error creating file watcher for %v: %v", f, err) } - - for _, f := range filesToWatch { - _, err = watch.NewFileWatcher(f, func() { - glog.Info("file %v changed. Reloading NGINX", f) - n.SetForceReload(true) - }) - if err != nil { - glog.Fatalf("unexpected error creating file watcher: %v", err) - } - } - } return n } -// NGINXController ... +// NGINXController describes a NGINX Ingress controller. type NGINXController struct { cfg *Configuration @@ -237,30 +217,24 @@ type NGINXController struct { syncRateLimiter flowcontrol.RateLimiter - // stopLock is used to enforce only a single call to Stop is active. - // Needed because we allow stopping through an http endpoint and + // stopLock is used to enforce that only a single call to Stop send at + // a given time. We allow stopping through an HTTP endpoint and // allowing concurrent stoppers leads to stack traces. stopLock *sync.Mutex stopCh chan struct{} updateCh *channels.RingChannel - // ngxErrCh channel used to detect errors with the nginx processes + // ngxErrCh is used to detect errors with the NGINX processes ngxErrCh chan error // runningConfig contains the running configuration in the Backend runningConfig *ingress.Configuration - forceReload int32 - t *ngx_template.Template - binary string resolver []net.IP - stats *statsCollector - statusModule statusModule - // returns true if IPV6 is enabled in the pod isIPV6Enabled bool @@ -273,9 +247,9 @@ type NGINXController struct { fileSystem filesystem.Filesystem } -// Start start a new NGINX master process running in foreground. +// Start starts a new NGINX master process running in the foreground. func (n *NGINXController) Start() { - glog.Infof("starting Ingress controller") + glog.Infof("Starting NGINX Ingress controller") n.store.Run(n.stopCh) @@ -283,9 +257,9 @@ func (n *NGINXController) Start() { go n.syncStatus.Run() } - cmd := exec.Command(n.binary, "-c", cfgPath) + cmd := nginxExecCommand() - // put nginx in another process group to prevent it + // put NGINX in another process group to prevent it // to receive signals meant for the controller cmd.SysProcAttr = &syscall.SysProcAttr{ Setpgid: true, @@ -296,12 +270,12 @@ func (n *NGINXController) Start() { n.setupSSLProxy() } - glog.Info("starting NGINX process...") + glog.Info("Starting NGINX process") n.start(cmd) go n.syncQueue.Run(time.Second, n.stopCh) // force initial sync - n.syncQueue.Enqueue(&extensions.Ingress{}) + n.syncQueue.EnqueueTask(task.GetDummyObject("initial-sync")) for { select { @@ -320,7 +294,7 @@ func (n *NGINXController) Start() { // release command resources cmd.Process.Release() // start a new nginx master process if the controller is not being stopped - cmd = exec.Command(n.binary, "-c", cfgPath) + cmd = nginxExecCommand() cmd.SysProcAttr = &syscall.SysProcAttr{ Setpgid: true, Pgid: 0, @@ -334,12 +308,14 @@ func (n *NGINXController) Start() { if evt, ok := event.(store.Event); ok { glog.V(3).Infof("Event %v received - object %v", evt.Type, evt.Obj) if evt.Type == store.ConfigurationEvent { - n.SetForceReload(true) + // TODO: is this necessary? Consider removing this special case + n.syncQueue.EnqueueTask(task.GetDummyObject("configmap-change")) + continue } - n.syncQueue.Enqueue(evt.Obj) + n.syncQueue.EnqueueSkippableTask(evt.Obj) } else { - glog.Warningf("unexpected event type received %T", event) + glog.Warningf("Unexpected event type received %T", event) } case <-n.stopCh: break @@ -354,12 +330,11 @@ func (n *NGINXController) Stop() error { n.stopLock.Lock() defer n.stopLock.Unlock() - // Only try draining the workqueue if we haven't already. if n.syncQueue.IsShuttingDown() { return fmt.Errorf("shutdown already in progress") } - glog.Infof("shutting down controller queues") + glog.Infof("Shutting down controller queues") close(n.stopCh) go n.syncQueue.Shutdown() if n.syncStatus != nil { @@ -368,7 +343,7 @@ func (n *NGINXController) Stop() error { // Send stop signal to Nginx glog.Info("stopping NGINX process...") - cmd := exec.Command(n.binary, "-c", cfgPath, "-s", "quit") + cmd := nginxExecCommand("-s", "quit") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err := cmd.Run() @@ -376,7 +351,7 @@ func (n *NGINXController) Stop() error { return err } - // Wait for the Nginx process disappear + // wait for the NGINX process to terminate timer := time.NewTicker(time.Second * 1) for range timer.C { if !process.IsNginxRunning() { @@ -393,7 +368,7 @@ func (n *NGINXController) start(cmd *exec.Cmd) { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Start(); err != nil { - glog.Fatalf("nginx error: %v", err) + glog.Fatalf("NGINX error: %v", err) n.ngxErrCh <- err return } @@ -416,18 +391,18 @@ func (n NGINXController) DefaultEndpoint() ingress.Endpoint { // running the command "nginx -t" using a temporal file. func (n NGINXController) testTemplate(cfg []byte) error { if len(cfg) == 0 { - return fmt.Errorf("invalid nginx configuration (empty)") + return fmt.Errorf("Invalid NGINX configuration (empty)") } tmpfile, err := ioutil.TempFile("", "nginx-cfg") if err != nil { return err } defer tmpfile.Close() - err = ioutil.WriteFile(tmpfile.Name(), cfg, 0644) + err = ioutil.WriteFile(tmpfile.Name(), cfg, file.ReadWriteByUser) if err != nil { return err } - out, err := exec.Command(n.binary, "-t", "-c", tmpfile.Name()).CombinedOutput() + out, err := nginxTestCommand(tmpfile.Name()).CombinedOutput() if err != nil { // this error is different from the rest because it must be clear why nginx is not working oe := fmt.Sprintf(` @@ -443,14 +418,10 @@ Error: %v return nil } -// OnUpdate is called periodically by syncQueue to keep the configuration in sync. -// -// 1. converts configmap configuration to custom configuration object -// 2. write the custom template (the complexity depends on the implementation) -// 3. write the configuration file -// -// returning nil implies the backend will be reloaded. -// if an error is returned means requeue the update +// OnUpdate is called by the synchronization loop whenever configuration +// changes were detected. The received backend Configuration is merged with the +// configuration ConfigMap before generating the final configuration file. +// Returns nil in case the backend was successfully reloaded. func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { cfg := n.store.GetBackendConfiguration() cfg.Resolver = n.resolver @@ -460,7 +431,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { for _, pb := range ingressCfg.PassthroughBackends { svc := pb.Service if svc == nil { - glog.Warningf("missing service for PassthroughBackends %v", pb.Backend) + glog.Warningf("Missing Service for SSL Passthrough backend %q", pb.Backend) continue } port, err := strconv.Atoi(pb.Port.String()) @@ -480,7 +451,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { } } - //TODO: Allow PassthroughBackends to specify they support proxy-protocol + // TODO: Allow PassthroughBackends to specify they support proxy-protocol servers = append(servers, &TCPServer{ Hostname: pb.Hostname, IP: svc.Spec.ClusterIP, @@ -492,13 +463,6 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { n.Proxy.ServerList = servers } - // we need to check if the status module configuration changed - if cfg.EnableVtsStatus { - n.setupMonitor(vtsStatusModule) - } else { - n.setupMonitor(defaultStatusModule) - } - // NGINX cannot resize the hash tables used to store server names. // For this reason we check if the defined size defined is correct // for the FQDN defined in the ingress rules adjusting the value @@ -520,7 +484,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { } else { n = fmt.Sprintf("www.%v", srv.Hostname) } - glog.V(3).Infof("creating redirect from %v to %v", srv.Hostname, n) + glog.V(3).Infof("Creating redirect from %q to %q", srv.Hostname, n) if _, ok := redirectServers[n]; !ok { found := false for _, esrv := range ingressCfg.Servers { @@ -537,24 +501,24 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { } if cfg.ServerNameHashBucketSize == 0 { nameHashBucketSize := nginxHashBucketSize(longestName) - glog.V(3).Infof("adjusting ServerNameHashBucketSize variable to %v", nameHashBucketSize) + glog.V(3).Infof("Adjusting ServerNameHashBucketSize variable to %q", nameHashBucketSize) cfg.ServerNameHashBucketSize = nameHashBucketSize } serverNameHashMaxSize := nextPowerOf2(serverNameBytes) if cfg.ServerNameHashMaxSize < serverNameHashMaxSize { - glog.V(3).Infof("adjusting ServerNameHashMaxSize variable to %v", serverNameHashMaxSize) + glog.V(3).Infof("Adjusting ServerNameHashMaxSize variable to %q", serverNameHashMaxSize) cfg.ServerNameHashMaxSize = serverNameHashMaxSize } // the limit of open files is per worker process // and we leave some room to avoid consuming all the FDs available wp, err := strconv.Atoi(cfg.WorkerProcesses) - glog.V(3).Infof("number of worker processes: %v", wp) + glog.V(3).Infof("Number of worker processes: %d", wp) if err != nil { wp = 1 } maxOpenFiles := (sysctlFSFileMax() / wp) - 1024 - glog.V(2).Infof("maximum number of open file descriptors : %v", maxOpenFiles) + glog.V(2).Infof("Maximum number of open file descriptors: %d", maxOpenFiles) if maxOpenFiles < 1024 { // this means the value of RLIMIT_NOFILE is too low. maxOpenFiles = 1024 @@ -564,7 +528,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { if cfg.ProxySetHeaders != "" { cmap, err := n.store.GetConfigMap(cfg.ProxySetHeaders) if err != nil { - glog.Warningf("unexpected error reading configmap %v: %v", cfg.ProxySetHeaders, err) + glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.ProxySetHeaders, err) } setHeaders = cmap.Data @@ -574,7 +538,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { if cfg.AddHeaders != "" { cmap, err := n.store.GetConfigMap(cfg.AddHeaders) if err != nil { - glog.Warningf("unexpected error reading configmap %v: %v", cfg.AddHeaders, err) + glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.AddHeaders, err) } addHeaders = cmap.Data @@ -586,7 +550,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { secret, err := n.store.GetSecret(secretName) if err != nil { - glog.Warningf("unexpected error reading secret %v: %v", secretName, err) + glog.Warningf("Error reading Secret %q from local store: %v", secretName, err) } nsSecName := strings.Replace(secretName, "/", "-", -1) @@ -595,7 +559,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { if ok { pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh, n.fileSystem) if err != nil { - glog.Warningf("unexpected error adding or updating dhparam %v file: %v", nsSecName, err) + glog.Warningf("Error adding or updating dhparam file %v: %v", nsSecName, err) } else { sslDHParam = pemFileName } @@ -647,31 +611,28 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { return err } defer tmpfile.Close() - err = ioutil.WriteFile(tmpfile.Name(), content, 0644) + err = ioutil.WriteFile(tmpfile.Name(), content, file.ReadWriteByUser) if err != nil { return err } - // executing diff can return exit code != 0 + // TODO: executing diff can return exit code != 0 diffOutput, _ := exec.Command("diff", "-u", cfgPath, tmpfile.Name()).CombinedOutput() - glog.Infof("NGINX configuration diff\n") - glog.Infof("%v\n", string(diffOutput)) + glog.Infof("NGINX configuration diff:\n%v", string(diffOutput)) - // Do not use defer to remove the temporal file. - // This is helpful when there is an error in the - // temporal configuration (we can manually inspect the file). - // Only remove the file when no error occurred. + // we do not defer the deletion of temp files in order + // to keep them around for inspection in case of error os.Remove(tmpfile.Name()) } } - err = ioutil.WriteFile(cfgPath, content, 0644) + err = ioutil.WriteFile(cfgPath, content, file.ReadWriteByUser) if err != nil { return err } - o, err := exec.Command(n.binary, "-s", "reload", "-c", cfgPath).CombinedOutput() + o, err := nginxExecCommand("-s", "reload").CombinedOutput() if err != nil { return fmt.Errorf("%v\n%v", err, string(o)) } @@ -679,9 +640,10 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { return nil } -// nginxHashBucketSize computes the correct nginx hash_bucket_size for a hash with the given longest key +// nginxHashBucketSize computes the correct NGINX hash_bucket_size for a hash +// with the given longest key. func nginxHashBucketSize(longestString int) int { - // See https://github.com/kubernetes/ingress-nginxs/issues/623 for an explanation + // see https://github.com/kubernetes/ingress-nginxs/issues/623 for an explanation wordSize := 8 // Assume 64 bit CPU n := longestString + 2 aligned := (n + wordSize - 1) & ^(wordSize - 1) @@ -708,7 +670,7 @@ func (n *NGINXController) setupSSLProxy() { sslPort := n.cfg.ListenPorts.HTTPS proxyPort := n.cfg.ListenPorts.SSLProxy - glog.Info("starting TLS proxy for SSL passthrough") + glog.Info("Starting TLS proxy for SSL Passthrough") n.Proxy = &TCPProxy{ Default: &TCPServer{ Hostname: "localhost", @@ -725,32 +687,33 @@ func (n *NGINXController) setupSSLProxy() { proxyList := &proxyproto.Listener{Listener: listener, ProxyHeaderTimeout: cfg.ProxyProtocolHeaderTimeout} - // start goroutine that accepts tcp connections in port 443 + // accept TCP connections on the configured HTTPS port go func() { for { var conn net.Conn var err error if n.store.GetBackendConfiguration().UseProxyProtocol { - // we need to wrap the listener in order to decode - // proxy protocol before handling the connection + // wrap the listener in order to decode Proxy + // Protocol before handling the connection conn, err = proxyList.Accept() } else { conn, err = listener.Accept() } if err != nil { - glog.Warningf("unexpected error accepting tcp connection: %v", err) + glog.Warningf("Error accepting TCP connection: %v", err) continue } - glog.V(3).Infof("remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr()) + glog.V(3).Infof("Handling connection from remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr()) go n.Proxy.Handle(conn) } }() } -// IsDynamicConfigurationEnough decides if the new configuration changes can be dynamically applied without reloading +// IsDynamicConfigurationEnough returns whether a Configuration can be +// dynamically applied, without reloading the backend. func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configuration) bool { copyOfRunningConfig := *n.runningConfig copyOfPcfg := *pcfg @@ -761,8 +724,8 @@ func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configurati return copyOfRunningConfig.Equal(©OfPcfg) } -// configureDynamically JSON encodes new Backends and POSTs it to an internal HTTP endpoint -// that is handled by Lua +// configureDynamically encodes new Backends in JSON format and POSTs the +// payload to an internal HTTP endpoint handled by Lua. func configureDynamically(pcfg *ingress.Configuration, port int) error { backends := make([]*ingress.Backend, len(pcfg.Backends)) @@ -796,7 +759,7 @@ func configureDynamically(pcfg *ingress.Configuration, port int) error { return err } - glog.V(2).Infof("posting backends configuration: %s", buf) + glog.V(2).Infof("Posting backends configuration: %s", buf) url := fmt.Sprintf("http://localhost:%d/configuration/backends", port) resp, err := http.Post(url, "application/json", bytes.NewReader(buf)) @@ -806,7 +769,7 @@ func configureDynamically(pcfg *ingress.Configuration, port int) error { defer func() { if err := resp.Body.Close(); err != nil { - glog.Warningf("error while closing response body: \n%v", err) + glog.Warningf("Error while closing response body:\n%v", err) } }() diff --git a/internal/ingress/controller/stat_collector.go b/internal/ingress/controller/stat_collector.go deleted file mode 100644 index ad3434d15..000000000 --- a/internal/ingress/controller/stat_collector.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - - "k8s.io/ingress-nginx/internal/ingress/controller/metric/collector" -) - -const ( - ngxStatusPath = "/nginx_status" - ngxVtsPath = "/nginx_status/format/json" -) - -func (n *NGINXController) setupMonitor(sm statusModule) { - csm := n.statusModule - if csm != sm { - glog.Infof("changing prometheus collector from %v to %v", csm, sm) - n.stats.stop(csm) - n.stats.start(sm) - n.statusModule = sm - } -} - -type statsCollector struct { - process prometheus.Collector - basic collector.Stopable - vts collector.Stopable - - namespace string - watchClass string - - port int -} - -func (s *statsCollector) stop(sm statusModule) { - switch sm { - case defaultStatusModule: - s.basic.Stop() - prometheus.Unregister(s.basic) - case vtsStatusModule: - s.vts.Stop() - prometheus.Unregister(s.vts) - } -} - -func (s *statsCollector) start(sm statusModule) { - switch sm { - case defaultStatusModule: - s.basic = collector.NewNginxStatus(s.namespace, s.watchClass, s.port, ngxStatusPath) - prometheus.Register(s.basic) - break - case vtsStatusModule: - s.vts = collector.NewNGINXVTSCollector(s.namespace, s.watchClass, s.port, ngxVtsPath) - prometheus.Register(s.vts) - break - } -} - -func newStatsCollector(ns, class, binary string, port int) *statsCollector { - glog.Infof("starting new nginx stats collector for Ingress controller running in namespace %v (class %v)", ns, class) - glog.Infof("collector extracting information from port %v", port) - pc, err := collector.NewNamedProcess(true, collector.BinaryNameMatcher{ - Name: "nginx", - Binary: binary, - }) - if err != nil { - glog.Fatalf("unexpected error registering nginx collector: %v", err) - } - err = prometheus.Register(pc) - if err != nil { - glog.Fatalf("unexpected error registering nginx collector: %v", err) - } - - return &statsCollector{ - namespace: ns, - watchClass: class, - process: pc, - port: port, - } -} diff --git a/internal/ingress/controller/store/store.go b/internal/ingress/controller/store/store.go index ac4cceab2..6452c1bbc 100644 --- a/internal/ingress/controller/store/store.go +++ b/internal/ingress/controller/store/store.go @@ -479,6 +479,18 @@ func New(checkOCSP bool, if key == configmap { store.setConfig(cm) } + + ings := store.listers.IngressAnnotation.List() + for _, ingKey := range ings { + key := k8s.MetaNamespaceKey(ingKey) + ing, err := store.GetIngress(key) + if err != nil { + glog.Errorf("could not find Ingress %v in local store: %v", key, err) + continue + } + store.extractAnnotations(ing) + } + updateCh.In() <- Event{ Type: ConfigurationEvent, Obj: cur, @@ -494,6 +506,13 @@ func New(checkOCSP bool, store.informers.ConfigMap.AddEventHandler(cmEventHandler) store.informers.Service.AddEventHandler(cache.ResourceEventHandlerFuncs{}) + // do not wait for informers to read the configmap configuration + cm, err := client.CoreV1().ConfigMaps(namespace).Get(configmap, metav1.GetOptions{}) + if err != nil { + glog.Warningf("Unexpected error reading configuration configmap: %v", err) + } + store.setConfig(cm) + return store } @@ -699,7 +718,7 @@ func (s *k8sStore) setConfig(cmap *corev1.ConfigMap) { glog.Warningf("unexpected error decoding key ssl-session-ticket-key: %v", err) s.backendConfig.SSLSessionTicketKey = "" } - ioutil.WriteFile("/etc/nginx/tickets.key", d, 0644) + ioutil.WriteFile("/etc/nginx/tickets.key", d, file.ReadWriteByUser) } } diff --git a/internal/ingress/controller/tcp.go b/internal/ingress/controller/tcp.go index 4b3f1a36d..cfaca7b20 100644 --- a/internal/ingress/controller/tcp.go +++ b/internal/ingress/controller/tcp.go @@ -26,7 +26,7 @@ import ( "github.com/paultag/sniff/parser" ) -// TCPServer describes a server that works in passthrough mode +// TCPServer describes a server that works in passthrough mode. type TCPServer struct { Hostname string IP string @@ -34,13 +34,13 @@ type TCPServer struct { ProxyProtocol bool } -// TCPProxy describes the passthrough servers and a default as catch all +// TCPProxy describes the passthrough servers and a default as catch all. type TCPProxy struct { ServerList []*TCPServer Default *TCPServer } -// Get returns the TCPServer to use +// Get returns the TCPServer to use for a given host. func (p *TCPProxy) Get(host string) *TCPServer { if p.ServerList == nil { return p.Default @@ -63,19 +63,19 @@ func (p *TCPProxy) Handle(conn net.Conn) { length, err := conn.Read(data) if err != nil { - glog.V(4).Infof("error reading the first 4k of the connection: %s", err) + glog.V(4).Infof("Error reading the first 4k of the connection: %s", err) return } proxy := p.Default hostname, err := parser.GetHostname(data[:]) if err == nil { - glog.V(4).Infof("parsed hostname from TLS Client Hello: %s", hostname) + glog.V(4).Infof("Parsed hostname from TLS Client Hello: %s", hostname) proxy = p.Get(hostname) } if proxy == nil { - glog.V(4).Infof("there is no configured proxy for SSL connections") + glog.V(4).Infof("There is no configured proxy for SSL connections.") return } @@ -86,7 +86,7 @@ func (p *TCPProxy) Handle(conn net.Conn) { defer clientConn.Close() if proxy.ProxyProtocol { - //Write out the proxy-protocol header + // write out the Proxy Protocol header localAddr := conn.LocalAddr().(*net.TCPAddr) remoteAddr := conn.RemoteAddr().(*net.TCPAddr) protocol := "UNKNOWN" @@ -96,16 +96,16 @@ func (p *TCPProxy) Handle(conn net.Conn) { protocol = "TCP6" } proxyProtocolHeader := fmt.Sprintf("PROXY %s %s %s %d %d\r\n", protocol, remoteAddr.IP.String(), localAddr.IP.String(), remoteAddr.Port, localAddr.Port) - glog.V(4).Infof("Writing proxy protocol header - %s", proxyProtocolHeader) + glog.V(4).Infof("Writing Proxy Protocol header: %s", proxyProtocolHeader) _, err = fmt.Fprintf(clientConn, proxyProtocolHeader) } if err != nil { - glog.Errorf("unexpected error writing proxy-protocol header: %s", err) + glog.Errorf("Error writing Proxy Protocol header: %s", err) clientConn.Close() } else { _, err = clientConn.Write(data[:length]) if err != nil { - glog.Errorf("unexpected error writing first 4k of proxy data: %s", err) + glog.Errorf("Error writing the first 4k of proxy data: %s", err) clientConn.Close() } } diff --git a/internal/ingress/controller/template/configmap.go b/internal/ingress/controller/template/configmap.go index 8092ef5c0..6f3a1b825 100644 --- a/internal/ingress/controller/template/configmap.go +++ b/internal/ingress/controller/template/configmap.go @@ -25,6 +25,7 @@ import ( "github.com/golang/glog" + "github.com/mitchellh/hashstructure" "github.com/mitchellh/mapstructure" "k8s.io/apimachinery/pkg/util/sets" @@ -191,6 +192,15 @@ func ReadConfig(src map[string]string) config.Configuration { glog.Warningf("unexpected error merging defaults: %v", err) } + hash, err := hashstructure.Hash(to, &hashstructure.HashOptions{ + TagName: "json", + }) + if err != nil { + glog.Warningf("unexpected error obtaining hash: %v", err) + } + + to.Checksum = fmt.Sprintf("%v", hash) + return to } diff --git a/internal/ingress/controller/template/configmap_test.go b/internal/ingress/controller/template/configmap_test.go index 987d76125..e503c8654 100644 --- a/internal/ingress/controller/template/configmap_test.go +++ b/internal/ingress/controller/template/configmap_test.go @@ -17,11 +17,13 @@ limitations under the License. package template import ( + "fmt" "reflect" "testing" "time" "github.com/kylelemons/godebug/pretty" + "github.com/mitchellh/hashstructure" "k8s.io/ingress-nginx/internal/ingress/controller/config" ) @@ -88,6 +90,14 @@ func TestMergeConfigMapToStruct(t *testing.T) { def.NginxStatusIpv6Whitelist = []string{"::1", "2001::/16"} def.ProxyAddOriginalUriHeader = false + hash, err := hashstructure.Hash(def, &hashstructure.HashOptions{ + TagName: "json", + }) + if err != nil { + t.Fatalf("unexpected error obtaining hash: %v", err) + } + def.Checksum = fmt.Sprintf("%v", hash) + to := ReadConfig(conf) if diff := pretty.Compare(to, def); diff != "" { t.Errorf("unexpected diff: (-got +want)\n%s", diff) @@ -107,6 +117,14 @@ func TestMergeConfigMapToStruct(t *testing.T) { } def = config.NewDefault() + hash, err = hashstructure.Hash(def, &hashstructure.HashOptions{ + TagName: "json", + }) + if err != nil { + t.Fatalf("unexpected error obtaining hash: %v", err) + } + def.Checksum = fmt.Sprintf("%v", hash) + to = ReadConfig(map[string]string{}) if diff := pretty.Compare(to, def); diff != "" { t.Errorf("unexpected diff: (-got +want)\n%s", diff) @@ -114,6 +132,15 @@ func TestMergeConfigMapToStruct(t *testing.T) { def = config.NewDefault() def.WhitelistSourceRange = []string{"1.1.1.1/32"} + + hash, err = hashstructure.Hash(def, &hashstructure.HashOptions{ + TagName: "json", + }) + if err != nil { + t.Fatalf("unexpected error obtaining hash: %v", err) + } + def.Checksum = fmt.Sprintf("%v", hash) + to = ReadConfig(map[string]string{ "whitelist-source-range": "1.1.1.1/32", }) diff --git a/internal/ingress/controller/util.go b/internal/ingress/controller/util.go index 195842679..28005bd84 100644 --- a/internal/ingress/controller/util.go +++ b/internal/ingress/controller/util.go @@ -17,6 +17,8 @@ limitations under the License. package controller import ( + "os" + "os/exec" "syscall" "github.com/golang/glog" @@ -41,29 +43,53 @@ func newUpstream(name string) *ingress.Backend { } } -// sysctlSomaxconn returns the value of net.core.somaxconn, i.e. -// maximum number of connections that can be queued for acceptance +// sysctlSomaxconn returns the maximum number of connections that can be queued +// for acceptance (value of net.core.somaxconn) // http://nginx.org/en/docs/http/ngx_http_core_module.html#listen func sysctlSomaxconn() int { maxConns, err := sysctl.New().GetSysctl("net/core/somaxconn") if err != nil || maxConns < 512 { - glog.V(3).Infof("system net.core.somaxconn=%v (using system default)", maxConns) + glog.V(3).Infof("net.core.somaxconn=%v (using system default)", maxConns) return 511 } return maxConns } -// sysctlFSFileMax returns the value of fs.file-max, i.e. -// maximum number of open file descriptors +// sysctlFSFileMax returns the maximum number of open file descriptors (value +// of fs.file-max) or 0 in case of error. func sysctlFSFileMax() int { var rLimit syscall.Rlimit err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) if err != nil { - glog.Errorf("unexpected error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err) - // returning 0 means don't render the value + glog.Errorf("Error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err) return 0 } glog.V(2).Infof("rlimit.max=%v", rLimit.Max) return int(rLimit.Max) } + +const ( + defBinary = "/usr/sbin/nginx" + cfgPath = "/etc/nginx/nginx.conf" +) + +func nginxExecCommand(args ...string) *exec.Cmd { + ngx := os.Getenv("NGINX_BINARY") + if ngx == "" { + ngx = defBinary + } + + cmdArgs := []string{"-c", cfgPath} + cmdArgs = append(cmdArgs, args...) + return exec.Command(ngx, cmdArgs...) +} + +func nginxTestCommand(cfg string) *exec.Cmd { + ngx := os.Getenv("NGINX_BINARY") + if ngx == "" { + ngx = defBinary + } + + return exec.Command(ngx, "-c", cfg, "-t") +} diff --git a/internal/ingress/metric/collector/collector.go b/internal/ingress/metric/collector/collector.go new file mode 100644 index 000000000..697923764 --- /dev/null +++ b/internal/ingress/metric/collector/collector.go @@ -0,0 +1,296 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collector + +import ( + "encoding/json" + "net" + "strings" + "time" + + "github.com/golang/glog" + "github.com/prometheus/client_golang/prometheus" +) + +type socketData struct { + Host string `json:"host"` // Label + Status string `json:"status"` // Label + + RealIPAddress string `json:"realIpAddr"` // Label + RemoteAddress string `json:"remoteAddr"` // Label + RemoteUser string `json:"remoteUser"` // Label + + BytesSent float64 `json:"bytesSent"` // Metric + + Protocol string `json:"protocol"` // Label + Method string `json:"method"` // Label + URI string `json:"uri"` // Label + + RequestLength float64 `json:"requestLength"` // Metric + RequestTime float64 `json:"requestTime"` // Metric + + UpstreamName string `json:"upstreamName"` // Label + UpstreamIP string `json:"upstreamIP"` // Label + UpstreamResponseTime float64 `json:"upstreamResponseTime"` // Metric + UpstreamStatus string `json:"upstreamStatus"` // Label + + Namespace string `json:"namespace"` // Label + Ingress string `json:"ingress"` // Label + Service string `json:"service"` // Label +} + +// SocketCollector stores prometheus metrics and ingress meta-data +type SocketCollector struct { + upstreamResponseTime *prometheus.HistogramVec + requestTime *prometheus.HistogramVec + requestLength *prometheus.HistogramVec + bytesSent *prometheus.HistogramVec + collectorSuccess *prometheus.GaugeVec + collectorSuccessTime *prometheus.GaugeVec + requests *prometheus.CounterVec + listener net.Listener + ns string + ingressClass string +} + +// NewInstance creates a new SocketCollector instance +func NewInstance(ns string, class string) error { + sc := SocketCollector{} + + ns = strings.Replace(ns, "-", "_", -1) + + listener, err := net.Listen("unix", "/tmp/prometheus-nginx.socket") + if err != nil { + return err + } + + sc.listener = listener + sc.ns = ns + sc.ingressClass = class + + requestTags := []string{"host", "status", "remote_address", "real_ip_address", "remote_user", "protocol", "method", "uri", "upstream_name", "upstream_ip", "upstream_status", "namespace", "ingress", "service"} + collectorTags := []string{"namespace", "ingress_class"} + + sc.upstreamResponseTime = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "upstream_response_time_seconds", + Help: "The time spent on receiving the response from the upstream server", + Namespace: ns, + }, + requestTags, + ) + + sc.requestTime = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "The request processing time in seconds", + Namespace: ns, + }, + requestTags, + ) + + sc.requestLength = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_length_bytes", + Help: "The request length (including request line, header, and request body)", + Namespace: ns, + Buckets: prometheus.LinearBuckets(10, 10, 10), // 10 buckets, each 10 bytes wide. + }, + requestTags, + ) + + sc.requests = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "requests", + Help: "The total number of client requests.", + Namespace: ns, + }, + collectorTags, + ) + + sc.bytesSent = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "bytes_sent", + Help: "The the number of bytes sent to a client", + Namespace: ns, + Buckets: prometheus.ExponentialBuckets(10, 10, 7), // 7 buckets, exponential factor of 10. + }, + requestTags, + ) + + sc.collectorSuccess = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "collector_last_run_successful", + Help: "Whether the last collector run was successful (success = 1, failure = 0).", + Namespace: ns, + }, + collectorTags, + ) + + sc.collectorSuccessTime = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "collector_last_run_successful_timestamp_seconds", + Help: "Timestamp of the last successful collector run", + Namespace: ns, + }, + collectorTags, + ) + + prometheus.MustRegister(sc.upstreamResponseTime) + prometheus.MustRegister(sc.requestTime) + prometheus.MustRegister(sc.requestLength) + prometheus.MustRegister(sc.requests) + prometheus.MustRegister(sc.bytesSent) + prometheus.MustRegister(sc.collectorSuccess) + prometheus.MustRegister(sc.collectorSuccessTime) + + go sc.Run() + + return nil +} + +func (sc *SocketCollector) handleMessage(msg []byte) { + glog.V(5).Infof("msg: %v", string(msg)) + + collectorSuccess := true + + // Unmarshall bytes + var stats socketData + err := json.Unmarshal(msg, &stats) + if err != nil { + glog.Errorf("Unexpected error deserializing JSON paylod: %v", err) + collectorSuccess = false + return + } + + // Create Request Labels Map + requestLabels := prometheus.Labels{ + "host": stats.Host, + "status": stats.Status, + "remote_address": stats.RemoteAddress, + "real_ip_address": stats.RealIPAddress, + "remote_user": stats.RemoteUser, + "protocol": stats.Protocol, + "method": stats.Method, + "uri": stats.URI, + "upstream_name": stats.UpstreamName, + "upstream_ip": stats.UpstreamIP, + "upstream_status": stats.UpstreamStatus, + "namespace": stats.Namespace, + "ingress": stats.Ingress, + "service": stats.Service, + } + + // Create Collector Labels Map + collectorLabels := prometheus.Labels{ + "namespace": sc.ns, + "ingress_class": sc.ingressClass, + } + + // Emit metrics + requestsMetric, err := sc.requests.GetMetricWith(collectorLabels) + if err != nil { + glog.Errorf("Error fetching requests metric: %v", err) + collectorSuccess = false + } else { + requestsMetric.Inc() + } + + if stats.UpstreamResponseTime != -1 { + upstreamResponseTimeMetric, err := sc.upstreamResponseTime.GetMetricWith(requestLabels) + if err != nil { + glog.Errorf("Error fetching upstream response time metric: %v", err) + collectorSuccess = false + } else { + upstreamResponseTimeMetric.Observe(stats.UpstreamResponseTime) + } + } + + if stats.RequestTime != -1 { + requestTimeMetric, err := sc.requestTime.GetMetricWith(requestLabels) + if err != nil { + glog.Errorf("Error fetching request duration metric: %v", err) + collectorSuccess = false + } else { + requestTimeMetric.Observe(stats.RequestTime) + } + } + + if stats.RequestLength != -1 { + requestLengthMetric, err := sc.requestLength.GetMetricWith(requestLabels) + if err != nil { + glog.Errorf("Error fetching request length metric: %v", err) + collectorSuccess = false + } else { + requestLengthMetric.Observe(stats.RequestLength) + } + } + + if stats.BytesSent != -1 { + bytesSentMetric, err := sc.bytesSent.GetMetricWith(requestLabels) + if err != nil { + glog.Errorf("Error fetching bytes sent metric: %v", err) + collectorSuccess = false + } else { + bytesSentMetric.Observe(stats.BytesSent) + } + } + + collectorSuccessMetric, err := sc.collectorSuccess.GetMetricWith(collectorLabels) + if err != nil { + glog.Errorf("Error fetching collector success metric: %v", err) + } else { + if collectorSuccess { + collectorSuccessMetric.Set(1) + collectorSuccessTimeMetric, err := sc.collectorSuccessTime.GetMetricWith(collectorLabels) + if err != nil { + glog.Errorf("Error fetching collector success time metric: %v", err) + } else { + collectorSuccessTimeMetric.Set(float64(time.Now().Unix())) + } + } else { + collectorSuccessMetric.Set(0) + } + } +} + +// Run listen for connections in the unix socket and spawns a goroutine to process the content +func (sc *SocketCollector) Run() { + for { + conn, err := sc.listener.Accept() + if err != nil { + continue + } + + go handleMessages(conn, sc.handleMessage) + } +} + +const packetSize = 1024 * 65 + +// handleMessages process the content received in a network connection +func handleMessages(conn net.Conn, fn func([]byte)) { + defer conn.Close() + + msg := make([]byte, packetSize) + s, err := conn.Read(msg[0:]) + if err != nil { + return + } + + fn(msg[0:s]) +} diff --git a/internal/ingress/metric/collector/collector_test.go b/internal/ingress/metric/collector/collector_test.go new file mode 100644 index 000000000..d5544924c --- /dev/null +++ b/internal/ingress/metric/collector/collector_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collector + +import ( + "fmt" + "net" + "sync/atomic" + "testing" + "time" +) + +func TestNewUDPLogListener(t *testing.T) { + var count uint64 + + fn := func(message []byte) { + t.Logf("message: %v", string(message)) + atomic.AddUint64(&count, 1) + } + + tmpFile := fmt.Sprintf("/tmp/test-socket-%v", time.Now().Nanosecond()) + + l, err := net.Listen("unix", tmpFile) + if err != nil { + t.Fatalf("unexpected error creating unix socket: %v", err) + } + if l == nil { + t.Fatalf("expected a listener but none returned") + } + + defer l.Close() + + go func() { + for { + conn, err := l.Accept() + if err != nil { + continue + } + + go handleMessages(conn, fn) + } + }() + + conn, _ := net.Dial("unix", tmpFile) + conn.Write([]byte("message")) + conn.Close() + + time.Sleep(1 * time.Millisecond) + if count != 1 { + t.Errorf("expected only one message from the UDP listern but %v returned", count) + } +} diff --git a/internal/ingress/controller/metric/collector/nginx.go b/internal/ingress/metric/collector/nginx_status_collector.go similarity index 53% rename from internal/ingress/controller/metric/collector/nginx.go rename to internal/ingress/metric/collector/nginx_status_collector.go index 74d47a357..156dc3e70 100644 --- a/internal/ingress/controller/metric/collector/nginx.go +++ b/internal/ingress/metric/collector/nginx_status_collector.go @@ -17,16 +17,30 @@ limitations under the License. package collector import ( + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" ) +var ( + ac = regexp.MustCompile(`Active connections: (\d+)`) + sahr = regexp.MustCompile(`(\d+)\s(\d+)\s(\d+)`) + reading = regexp.MustCompile(`Reading: (\d+)`) + writing = regexp.MustCompile(`Writing: (\d+)`) + waiting = regexp.MustCompile(`Waiting: (\d+)`) +) + type ( nginxStatusCollector struct { scrapeChan chan scrapeRequest ngxHealthPort int - ngxVtsPath string + ngxStatusPath string data *nginxStatusData watchNamespace string ingressClass string @@ -37,15 +51,33 @@ type ( requestsTotal *prometheus.Desc connections *prometheus.Desc } + + basicStatus struct { + // Active total number of active connections + Active int + // Accepted total number of accepted client connections + Accepted int + // Handled total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit). + Handled int + // Requests total number of client requests. + Requests int + // Reading current number of connections where nginx is reading the request header. + Reading int + // Writing current number of connections where nginx is writing the response back to the client. + Writing int + // Waiting current number of idle client connections waiting for a request. + Waiting int + } ) -// NewNginxStatus returns a new prometheus collector the default nginx status module -func NewNginxStatus(watchNamespace, ingressClass string, ngxHealthPort int, ngxVtsPath string) Stopable { - +// InitNGINXStatusCollector returns a new prometheus collector the default nginx status module +func InitNGINXStatusCollector(watchNamespace, ingressClass string, ngxHealthPort int) error { + const ns string = "nginx" + const ngxStatusPath = "/nginx_status" p := nginxStatusCollector{ scrapeChan: make(chan scrapeRequest), ngxHealthPort: ngxHealthPort, - ngxVtsPath: ngxVtsPath, + ngxStatusPath: ngxStatusPath, watchNamespace: watchNamespace, ingressClass: ingressClass, } @@ -62,14 +94,20 @@ func NewNginxStatus(watchNamespace, ingressClass string, ngxHealthPort int, ngxV []string{"ingress_class", "namespace"}, nil), connections: prometheus.NewDesc( - prometheus.BuildFQName(ns, "", "connnections"), + prometheus.BuildFQName(ns, "", "connections"), "current number of client connections with state {reading, writing, waiting}", []string{"ingress_class", "namespace", "state"}, nil), } - go p.start() + err := prometheus.Register(p) - return p + if err != nil { + return fmt.Errorf("error while registering nginx status collector : %v", err) + } + + go p.Run() + + return nil } // Describe implements prometheus.Collector. @@ -86,7 +124,7 @@ func (p nginxStatusCollector) Collect(ch chan<- prometheus.Metric) { <-req.done } -func (p nginxStatusCollector) start() { +func (p nginxStatusCollector) Run() { for req := range p.scrapeChan { ch := req.results p.scrape(ch) @@ -98,9 +136,71 @@ func (p nginxStatusCollector) Stop() { close(p.scrapeChan) } +func httpBody(url string) ([]byte, error) { + resp, err := http.DefaultClient.Get(url) + if err != nil { + return nil, fmt.Errorf("unexpected error scraping nginx : %v", err) + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unexpected error scraping nginx (%v)", err) + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, fmt.Errorf("unexpected error scraping nginx (status %v)", resp.StatusCode) + } + + return data, nil +} + +func toInt(data []string, pos int) int { + if len(data) == 0 { + return 0 + } + if pos > len(data) { + return 0 + } + if v, err := strconv.Atoi(data[pos]); err == nil { + return v + } + return 0 +} + +func parse(data string) *basicStatus { + acr := ac.FindStringSubmatch(data) + sahrr := sahr.FindStringSubmatch(data) + readingr := reading.FindStringSubmatch(data) + writingr := writing.FindStringSubmatch(data) + waitingr := waiting.FindStringSubmatch(data) + + return &basicStatus{ + toInt(acr, 1), + toInt(sahrr, 1), + toInt(sahrr, 2), + toInt(sahrr, 3), + toInt(readingr, 1), + toInt(writingr, 1), + toInt(waitingr, 1), + } +} + +func getNginxStatus(port int, path string) (*basicStatus, error) { + url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path) + glog.V(3).Infof("start scraping url: %v", url) + + data, err := httpBody(url) + + if err != nil { + return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err) + } + + return parse(string(data)), nil +} + // nginxStatusCollector scrape the nginx status func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) { - s, err := getNginxStatus(p.ngxHealthPort, p.ngxVtsPath) + s, err := getNginxStatus(p.ngxHealthPort, p.ngxStatusPath) if err != nil { glog.Warningf("unexpected error obtaining nginx status info: %v", err) return diff --git a/internal/ingress/controller/metric/collector/process.go b/internal/ingress/metric/collector/process_collector.go similarity index 92% rename from internal/ingress/controller/metric/collector/process.go rename to internal/ingress/metric/collector/process_collector.go index 016798525..679b9149f 100644 --- a/internal/ingress/controller/metric/collector/process.go +++ b/internal/ingress/metric/collector/process_collector.go @@ -26,6 +26,17 @@ import ( "github.com/prometheus/client_golang/prometheus" ) +type scrapeRequest struct { + results chan<- prometheus.Metric + done chan struct{} +} + +// Stopable defines a prometheus collector that can be stopped +type Stopable interface { + prometheus.Collector + Stop() +} + // BinaryNameMatcher ... type BinaryNameMatcher struct { Name string @@ -60,8 +71,8 @@ type namedProcess struct { data namedProcessData } -// NewNamedProcess returns a new prometheus collector for the nginx process -func NewNamedProcess(children bool, mn common.MatchNamer) (prometheus.Collector, error) { +// newNamedProcess returns a new prometheus collector for the nginx process +func newNamedProcess(children bool, mn common.MatchNamer) (prometheus.Collector, error) { fs, err := proc.NewFS("/proc") if err != nil { return nil, err diff --git a/internal/ingress/status/status.go b/internal/ingress/status/status.go index 1622cbc35..6bb8556ba 100644 --- a/internal/ingress/status/status.go +++ b/internal/ingress/status/status.go @@ -32,7 +32,6 @@ import ( extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/leaderelection" @@ -183,11 +182,6 @@ func NewStatusSyncer(config Config) Sync { OnStartedLeading: func(stop <-chan struct{}) { glog.V(2).Infof("I am the new status update leader") go st.syncQueue.Run(time.Second, stop) - wait.PollUntil(updateInterval, func() (bool, error) { - // send a dummy object to the queue to force a sync - st.syncQueue.Enqueue("sync status") - return false, nil - }, stop) }, OnStoppedLeading: func() { glog.V(2).Infof("I am not status update leader anymore") diff --git a/internal/ingress/types.go b/internal/ingress/types.go index 28035a963..e265df373 100644 --- a/internal/ingress/types.go +++ b/internal/ingress/types.go @@ -63,6 +63,9 @@ type Configuration struct { // It contains information about the associated Server Name Indication (SNI). // +optional PassthroughBackends []*SSLPassthroughBackend `json:"passthroughBackends,omitempty"` + + // ConfigurationChecksum contains the particular checksum of a Configuration object + ConfigurationChecksum string `json:"configurationChecksum,omitempty"` } // Backend describes one or more remote server/s (endpoints) associated with a service @@ -230,10 +233,6 @@ type Location struct { // UsePortInRedirects indicates if redirects must specify the port // +optional UsePortInRedirects bool `json:"usePortInRedirects"` - // VtsFilterKey contains the vts filter key on the location level - // https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key - // +optional - VtsFilterKey string `json:"vtsFilterKey,omitempty"` // ConfigurationSnippet contains additional configuration for the backend // to be considered in the configuration of the location ConfigurationSnippet string `json:"configurationSnippet"` diff --git a/internal/ingress/types_equals.go b/internal/ingress/types_equals.go index b7be64408..4c4aac2c8 100644 --- a/internal/ingress/types_equals.go +++ b/internal/ingress/types_equals.go @@ -104,6 +104,10 @@ func (c1 *Configuration) Equal(c2 *Configuration) bool { } } + if c1.ConfigurationChecksum != c2.ConfigurationChecksum { + return false + } + return true } @@ -256,28 +260,34 @@ func (s1 *Server) Equal(s2 *Server) bool { if s1.Hostname != s2.Hostname { return false } - if s1.Alias != s2.Alias { - return false - } if s1.SSLPassthrough != s2.SSLPassthrough { return false } if !(&s1.SSLCert).Equal(&s2.SSLCert) { return false } - if !(&s1.CertificateAuth).Equal(&s2.CertificateAuth) { + if s1.Alias != s2.Alias { return false } if s1.RedirectFromToWWW != s2.RedirectFromToWWW { return false } - - if len(s1.Locations) != len(s2.Locations) { + if !(&s1.CertificateAuth).Equal(&s2.CertificateAuth) { + return false + } + if s1.ServerSnippet != s2.ServerSnippet { return false } if s1.SSLCiphers != s2.SSLCiphers { return false } + if s1.AuthTLSError != s2.AuthTLSError { + return false + } + + if len(s1.Locations) != len(s2.Locations) { + return false + } // Location are sorted for idx, s1l := range s1.Locations { diff --git a/internal/net/dns/dns_test.go b/internal/net/dns/dns_test.go index 979d65c32..bd2243ae7 100644 --- a/internal/net/dns/dns_test.go +++ b/internal/net/dns/dns_test.go @@ -21,6 +21,8 @@ import ( "net" "os" "testing" + + "k8s.io/ingress-nginx/internal/file" ) func TestGetDNSServers(t *testing.T) { @@ -32,22 +34,22 @@ func TestGetDNSServers(t *testing.T) { t.Error("expected at least 1 nameserver in /etc/resolv.conf") } - file, err := ioutil.TempFile("", "fw") + f, err := ioutil.TempFile("", "fw") if err != nil { t.Fatalf("unexpected error: %v", err) } - defer file.Close() - defer os.Remove(file.Name()) + defer f.Close() + defer os.Remove(f.Name()) - ioutil.WriteFile(file.Name(), []byte(` + ioutil.WriteFile(f.Name(), []byte(` # comment ; comment nameserver 2001:4860:4860::8844 nameserver 2001:4860:4860::8888 nameserver 8.8.8.8 - `), 0644) + `), file.ReadWriteByUser) - defResolvConf = file.Name() + defResolvConf = f.Name() s, err = GetSystemNameServers() if err != nil { t.Fatalf("unexpected error reading /etc/resolv.conf file: %v", err) diff --git a/internal/task/queue.go b/internal/task/queue.go index 3b4c0e41c..4c82a6024 100644 --- a/internal/task/queue.go +++ b/internal/task/queue.go @@ -22,6 +22,7 @@ import ( "github.com/golang/glog" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -50,23 +51,39 @@ type Queue struct { // Element represents one item of the queue type Element struct { - Key interface{} - Timestamp int64 + Key interface{} + Timestamp int64 + IsSkippable bool } -// Run ... +// Run starts processing elements in the queue func (t *Queue) Run(period time.Duration, stopCh <-chan struct{}) { wait.Until(t.worker, period, stopCh) } -// Enqueue enqueues ns/name of the given api object in the task queue. -func (t *Queue) Enqueue(obj interface{}) { +// EnqueueTask enqueues ns/name of the given api object in the task queue. +func (t *Queue) EnqueueTask(obj interface{}) { + t.enqueue(obj, false) +} + +// EnqueueSkippableTask enqueues ns/name of the given api object in +// the task queue that can be skipped +func (t *Queue) EnqueueSkippableTask(obj interface{}) { + t.enqueue(obj, true) +} + +// enqueue enqueues ns/name of the given api object in the task queue. +func (t *Queue) enqueue(obj interface{}, skippable bool) { if t.IsShuttingDown() { glog.Errorf("queue has been shutdown, failed to enqueue: %v", obj) return } ts := time.Now().UnixNano() + if !skippable { + // make sure the timestamp is bigger than lastSync + ts = time.Now().Add(24 * time.Hour).UnixNano() + } glog.V(3).Infof("queuing item %v", obj) key, err := t.fn(obj) if err != nil { @@ -166,3 +183,10 @@ func NewCustomTaskQueue(syncFn func(interface{}) error, fn func(interface{}) (in return q } + +// GetDummyObject returns a valid object that can be used in the Queue +func GetDummyObject(name string) *metav1.ObjectMeta { + return &metav1.ObjectMeta{ + Name: name, + } +} diff --git a/internal/task/queue_test.go b/internal/task/queue_test.go index 6cc1f3508..52dab7c20 100644 --- a/internal/task/queue_test.go +++ b/internal/task/queue_test.go @@ -71,7 +71,7 @@ func TestEnqueueSuccess(t *testing.T) { k: "testKey", v: "testValue", } - q.Enqueue(mo) + q.EnqueueSkippableTask(mo) // wait for 'mockSynFn' time.Sleep(time.Millisecond * 10) if atomic.LoadUint32(&sr) != 1 { @@ -99,7 +99,7 @@ func TestEnqueueFailed(t *testing.T) { q.Shutdown() // wait for shutdown time.Sleep(time.Millisecond * 10) - q.Enqueue(mo) + q.EnqueueSkippableTask(mo) // wait for 'mockSynFn' time.Sleep(time.Millisecond * 10) // queue is shutdown, so mockSynFn should not be executed, so the result should be 0 @@ -121,7 +121,7 @@ func TestEnqueueKeyError(t *testing.T) { v: "testValue", } - q.Enqueue(mo) + q.EnqueueSkippableTask(mo) // wait for 'mockSynFn' time.Sleep(time.Millisecond * 10) // key error, so the result should be 0 @@ -142,16 +142,16 @@ func TestSkipEnqueue(t *testing.T) { k: "testKey", v: "testValue", } - q.Enqueue(mo) - q.Enqueue(mo) - q.Enqueue(mo) - q.Enqueue(mo) + q.EnqueueSkippableTask(mo) + q.EnqueueSkippableTask(mo) + q.EnqueueTask(mo) + q.EnqueueSkippableTask(mo) // run queue go q.Run(time.Second, stopCh) // wait for 'mockSynFn' time.Sleep(time.Millisecond * 10) - if atomic.LoadUint32(&sr) != 1 { - t.Errorf("sr should be 1, but is %d", sr) + if atomic.LoadUint32(&sr) != 2 { + t.Errorf("sr should be 2, but is %d", sr) } // shutdown queue before exit diff --git a/internal/watch/dummy.go b/internal/watch/dummy.go deleted file mode 100644 index eb9874a32..000000000 --- a/internal/watch/dummy.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package watch - -// DummyFileWatcher noop implementation of a file watcher -type DummyFileWatcher struct{} - -// NewDummyFileWatcher creates a FileWatcher using the DummyFileWatcher -func NewDummyFileWatcher(file string, onEvent func()) FileWatcher { - return DummyFileWatcher{} -} - -// Close ends the watch -func (f DummyFileWatcher) Close() error { - return nil -} diff --git a/internal/watch/file_watcher_test.go b/internal/watch/file_watcher_test.go index 5733cd07c..83a37ea0b 100644 --- a/internal/watch/file_watcher_test.go +++ b/internal/watch/file_watcher_test.go @@ -21,6 +21,8 @@ import ( "os" "testing" "time" + + "k8s.io/ingress-nginx/internal/file" ) func prepareTimeout() chan bool { @@ -33,15 +35,15 @@ func prepareTimeout() chan bool { } func TestFileWatcher(t *testing.T) { - file, err := ioutil.TempFile("", "fw") + f, err := ioutil.TempFile("", "fw") if err != nil { t.Fatalf("unexpected error: %v", err) } - defer file.Close() - defer os.Remove(file.Name()) + defer f.Close() + defer os.Remove(f.Name()) count := 0 events := make(chan bool, 10) - fw, err := NewFileWatcher(file.Name(), func() { + fw, err := NewFileWatcher(f.Name(), func() { count++ if count != 1 { t.Fatalf("expected 1 but returned %v", count) @@ -58,7 +60,7 @@ func TestFileWatcher(t *testing.T) { t.Fatalf("expected no events before writing a file") case <-timeoutChan: } - ioutil.WriteFile(file.Name(), []byte{}, 0644) + ioutil.WriteFile(f.Name(), []byte{}, file.ReadWriteByUser) select { case <-events: case <-timeoutChan: diff --git a/rootfs/Dockerfile b/rootfs/Dockerfile index 4f438d556..a6c702021 100644 --- a/rootfs/Dockerfile +++ b/rootfs/Dockerfile @@ -20,15 +20,37 @@ WORKDIR /etc/nginx RUN clean-install \ diffutils \ - dumb-init + dumb-init \ + libcap2-bin + +COPY . / + +RUN setcap cap_net_bind_service=+ep /usr/sbin/nginx \ + && setcap cap_net_bind_service=+ep /nginx-ingress-controller + +RUN bash -eux -c ' \ + writeDirs=( \ + /etc/nginx \ + /etc/ingress-controller/ssl \ + /etc/ingress-controller/auth \ + /var/log \ + /var/log/nginx \ + /opt/modsecurity/var/log \ + /opt/modsecurity/var/upload \ + /opt/modsecurity/var/audit \ + ); \ + for dir in "${writeDirs[@]}"; do \ + mkdir -p ${dir}; \ + chown -R www-data.www-data ${dir}; \ + done \ + ' # Create symlinks to redirect nginx logs to stdout and stderr docker log collector # This only works if nginx is started with CMD or ENTRYPOINT -RUN mkdir -p /var/log/nginx \ - && ln -sf /dev/stdout /var/log/nginx/access.log \ +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ && ln -sf /dev/stderr /var/log/nginx/error.log -COPY . / +USER www-data ENTRYPOINT ["/usr/bin/dumb-init"] diff --git a/rootfs/etc/nginx/lua/balancer/chash.lua b/rootfs/etc/nginx/lua/balancer/chash.lua index 9590f1cd6..16dd89def 100644 --- a/rootfs/etc/nginx/lua/balancer/chash.lua +++ b/rootfs/etc/nginx/lua/balancer/chash.lua @@ -1,6 +1,7 @@ local balancer_resty = require("balancer.resty") local resty_chash = require("resty.chash") local util = require("util") +local split = require("util.split") local _M = balancer_resty:new({ factory = resty_chash, name = "chash" }) @@ -15,7 +16,7 @@ end function _M.balance(self) local key = util.lua_ngx_var(self.hash_by) local endpoint_string = self.instance:find(key) - return util.split_pair(endpoint_string, ":") + return split.split_pair(endpoint_string, ":") end return _M diff --git a/rootfs/etc/nginx/lua/balancer/ewma.lua b/rootfs/etc/nginx/lua/balancer/ewma.lua index 7327ead4a..c32709701 100644 --- a/rootfs/etc/nginx/lua/balancer/ewma.lua +++ b/rootfs/etc/nginx/lua/balancer/ewma.lua @@ -7,6 +7,7 @@ local resty_lock = require("resty.lock") local util = require("util") +local split = require("util.split") local DECAY_TIME = 10 -- this value is in seconds local LOCK_KEY = ":ewma_key" @@ -131,10 +132,10 @@ function _M.balance(self) end function _M.after_balance(_) - local response_time = tonumber(util.get_first_value(ngx.var.upstream_response_time)) or 0 - local connect_time = tonumber(util.get_first_value(ngx.var.upstream_connect_time)) or 0 + local response_time = tonumber(split.get_first_value(ngx.var.upstream_response_time)) or 0 + local connect_time = tonumber(split.get_first_value(ngx.var.upstream_connect_time)) or 0 local rtt = connect_time + response_time - local upstream = util.get_first_value(ngx.var.upstream_addr) + local upstream = split.get_first_value(ngx.var.upstream_addr) if util.is_blank(upstream) then return diff --git a/rootfs/etc/nginx/lua/balancer/round_robin.lua b/rootfs/etc/nginx/lua/balancer/round_robin.lua index f8bbccb5a..d8909f2cd 100644 --- a/rootfs/etc/nginx/lua/balancer/round_robin.lua +++ b/rootfs/etc/nginx/lua/balancer/round_robin.lua @@ -1,6 +1,7 @@ local balancer_resty = require("balancer.resty") local resty_roundrobin = require("resty.roundrobin") local util = require("util") +local split = require("util.split") local _M = balancer_resty:new({ factory = resty_roundrobin, name = "round_robin" }) @@ -14,7 +15,7 @@ end function _M.balance(self) local endpoint_string = self.instance:find() - return util.split_pair(endpoint_string, ":") + return split.split_pair(endpoint_string, ":") end return _M diff --git a/rootfs/etc/nginx/lua/balancer/sticky.lua b/rootfs/etc/nginx/lua/balancer/sticky.lua index ee1b0b2fb..8af93a986 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky.lua @@ -1,6 +1,7 @@ local balancer_resty = require("balancer.resty") local resty_chash = require("resty.chash") local util = require("util") +local split = require("util.split") local ck = require("resty.cookie") local _M = balancer_resty:new({ factory = resty_chash, name = "sticky" }) @@ -74,7 +75,7 @@ end function _M.balance(self) local endpoint_string = sticky_endpoint_string(self) - return util.split_pair(endpoint_string, ":") + return split.split_pair(endpoint_string, ":") end return _M diff --git a/rootfs/etc/nginx/lua/monitor.lua b/rootfs/etc/nginx/lua/monitor.lua new file mode 100644 index 000000000..2ef609cfd --- /dev/null +++ b/rootfs/etc/nginx/lua/monitor.lua @@ -0,0 +1,46 @@ +local socket = ngx.socket.tcp +local cjson = require('cjson') +local defer = require('util.defer') +local assert = assert + +local _M = {} + +local function send_data(jsonData) + local s = assert(socket()) + assert(s:connect('unix:/tmp/prometheus-nginx.socket')) + assert(s:send(jsonData)) + assert(s:close()) +end + +function _M.encode_nginx_stats() + return cjson.encode({ + host = ngx.var.host or "-", + status = ngx.var.status or "-", + remoteAddr = ngx.var.remote_addr or "-", + realIpAddr = ngx.var.realip_remote_addr or "-", + remoteUser = ngx.var.remote_user or "-", + bytesSent = tonumber(ngx.var.bytes_sent) or -1, + protocol = ngx.var.server_protocol or "-", + method = ngx.var.request_method or "-", + uri = ngx.var.uri or "-", + requestLength = tonumber(ngx.var.request_length) or -1, + requestTime = tonumber(ngx.var.request_time) or -1, + upstreamName = ngx.var.proxy_upstream_name or "-", + upstreamIP = ngx.var.upstream_addr or "-", + upstreamResponseTime = tonumber(ngx.var.upstream_response_time) or -1, + upstreamStatus = ngx.var.upstream_status or "-", + namespace = ngx.var.namespace or "-", + ingress = ngx.var.ingress_name or "-", + service = ngx.var.service_name or "-", + }) +end + +function _M.call() + local ok, err = defer.to_timer_phase(send_data, _M.encode_nginx_stats()) + if not ok then + ngx.log(ngx.ERR, "failed to defer send_data to timer phase: ", err) + return + end +end + +return _M diff --git a/rootfs/etc/nginx/lua/test/defer_test.lua b/rootfs/etc/nginx/lua/test/defer_test.lua new file mode 100644 index 000000000..6f4f94cc3 --- /dev/null +++ b/rootfs/etc/nginx/lua/test/defer_test.lua @@ -0,0 +1,20 @@ +package.path = "./rootfs/etc/nginx/lua/?.lua;./rootfs/etc/nginx/lua/test/mocks/?.lua;" .. package.path +_G._TEST = true +local defer = require('util.defer') + +local _ngx = { + shared = {}, + log = function(...) end, + get_phase = function() return "timer" end, +} +_G.ngx = _ngx + +describe("Defer", function() + describe("to_timer_phase", function() + it("executes passed callback immediately if called on timer phase", function() + defer.counter = 0 + defer.to_timer_phase(function() defer.counter = defer.counter + 1 end) + assert.equal(defer.counter, 1) + end) + end) +end) diff --git a/rootfs/etc/nginx/lua/test/monitor_test.lua b/rootfs/etc/nginx/lua/test/monitor_test.lua new file mode 100644 index 000000000..752f534bc --- /dev/null +++ b/rootfs/etc/nginx/lua/test/monitor_test.lua @@ -0,0 +1,122 @@ +package.path = "./rootfs/etc/nginx/lua/?.lua;./rootfs/etc/nginx/lua/test/mocks/?.lua;" .. package.path +_G._TEST = true +local cjson = require('cjson') + +local function udp_mock() + return { + setpeername = function(...) return true end, + send = function(payload) return payload end, + close = function(...) return true end + } +end + +local _ngx = { + shared = {}, + log = function(...) end, + socket = { + udp = udp_mock + }, + get_phase = function() return "timer" end, + var = {} +} +_G.ngx = _ngx + +describe("Monitor", function() + local monitor = require("monitor") + describe("encode_nginx_stats()", function() + it("successfuly encodes the current stats of nginx to JSON", function() + local nginx_environment = { + host = "testshop.com", + status = "200", + remote_addr = "10.10.10.10", + realip_remote_addr = "5.5.5.5", + remote_user = "admin", + bytes_sent = "150", + server_protocol = "HTTP", + request_method = "GET", + uri = "/admin", + request_length = "300", + request_time = "60", + proxy_upstream_name = "test-upstream", + upstream_addr = "2.2.2.2", + upstream_response_time = "200", + upstream_status = "220", + namespace = "test-app-production", + ingress_name = "web-yml", + service_name = "test-app", + } + ngx.var = nginx_environment + + local encode_nginx_stats = monitor.encode_nginx_stats + local encoded_json_stats = encode_nginx_stats() + local decoded_json_stats = cjson.decode(encoded_json_stats) + + local expected_json_stats = { + host = "testshop.com", + status = "200", + remoteAddr = "10.10.10.10", + realIpAddr = "5.5.5.5", + remoteUser = "admin", + bytesSent = 150.0, + protocol = "HTTP", + method = "GET", + uri = "/admin", + requestLength = 300.0, + requestTime = 60.0, + upstreamName = "test-upstream", + upstreamIP = "2.2.2.2", + upstreamResponseTime = 200, + upstreamStatus = "220", + namespace = "test-app-production", + ingress = "web-yml", + service = "test-app", + } + + assert.are.same(decoded_json_stats,expected_json_stats) + end) + + it("replaces empty numeric keys with -1 and missing string keys with -", function() + local nginx_environment = { + remote_addr = "10.10.10.10", + realip_remote_addr = "5.5.5.5", + remote_user = "francisco", + server_protocol = "HTTP", + request_method = "GET", + uri = "/admin", + request_time = "60", + proxy_upstream_name = "test-upstream", + upstream_addr = "2.2.2.2", + upstream_response_time = "200", + upstream_status = "220", + ingress_name = "web-yml", + } + ngx.var = nginx_environment + + local encode_nginx_stats = monitor.encode_nginx_stats + local encoded_json_stats = encode_nginx_stats() + local decoded_json_stats = cjson.decode(encoded_json_stats) + + local expected_json_stats = { + host = "-", + status = "-", + remoteAddr = "10.10.10.10", + realIpAddr = "5.5.5.5", + remoteUser = "francisco", + bytesSent = -1, + protocol = "HTTP", + method = "GET", + uri = "/admin", + requestLength = -1, + requestTime = 60.0, + upstreamName = "test-upstream", + upstreamIP = "2.2.2.2", + upstreamResponseTime = 200, + upstreamStatus = "220", + namespace = "-", + ingress = "web-yml", + service = "-", + } + assert.are.same(decoded_json_stats,expected_json_stats) + end) + end) +end) diff --git a/rootfs/etc/nginx/lua/util.lua b/rootfs/etc/nginx/lua/util.lua index bfe62b41e..449ad312f 100644 --- a/rootfs/etc/nginx/lua/util.lua +++ b/rootfs/etc/nginx/lua/util.lua @@ -49,17 +49,6 @@ function _M.lua_ngx_var(ngx_var) return ngx.var[var_name] end -function _M.split_pair(pair, seperator) - local i = pair:find(seperator) - if i == nil then - return pair, nil - else - local name = pair:sub(1, i - 1) - local value = pair:sub(i + 1, -1) - return name, value - end -end - -- this implementation is taken from -- https://web.archive.org/web/20131225070434/http://snippets.luacode.org/snippets/Deep_Comparison_of_Two_Values_3 -- and modified for use in this project @@ -88,30 +77,6 @@ function _M.is_blank(str) return str == nil or string_len(str) == 0 end --- http://nginx.org/en/docs/http/ngx_http_upstream_module.html#example --- CAVEAT: nginx is giving out : instead of , so the docs are wrong --- 127.0.0.1:26157 : 127.0.0.1:26157 , ngx.var.upstream_addr --- 200 : 200 , ngx.var.upstream_status --- 0.00 : 0.00, ngx.var.upstream_response_time -function _M.split_upstream_var(var) - if not var then - return nil, nil - end - local t = {} - for v in var:gmatch("[^%s|,]+") do - if v ~= ":" then - t[#t+1] = v - end - end - return t -end - -function _M.get_first_value(var) - local t = _M.split_upstream_var(var) or {} - if #t == 0 then return nil end - return t[1] -end - -- this implementation is taken from: -- https://github.com/luafun/luafun/blob/master/fun.lua#L33 -- SHA: 04c99f9c393e54a604adde4b25b794f48104e0d0 @@ -130,4 +95,13 @@ local function deepcopy(orig) end _M.deepcopy = deepcopy +local function tablelength(T) + local count = 0 + for _ in pairs(T) do + count = count + 1 + end + return count +end +_M.tablelength = tablelength + return _M diff --git a/rootfs/etc/nginx/lua/util/defer.lua b/rootfs/etc/nginx/lua/util/defer.lua new file mode 100644 index 000000000..3658de4a8 --- /dev/null +++ b/rootfs/etc/nginx/lua/util/defer.lua @@ -0,0 +1,57 @@ +local util = require("util") + +local timer_started = false +local queue = {} +local MAX_QUEUE_SIZE = 10000 + +local _M = {} + +local function flush_queue(premature) + -- TODO Investigate if we should actually still flush the queue when we're + -- shutting down. + if premature then return end + + local current_queue = queue + queue = {} + timer_started = false + + for _,v in ipairs(current_queue) do + v.func(unpack(v.args)) + end +end + +-- `to_timer_phase` will enqueue a function that will be executed in a timer +-- context, at a later point in time. The purpose is that some APIs (such as +-- sockets) are not available during some nginx request phases (such as the +-- logging phase), but are available for use in timers. There are no ordering +-- guarantees for when a function will be executed. +function _M.to_timer_phase(func, ...) + if ngx.get_phase() == "timer" then + func(...) + return true + end + + if #queue >= MAX_QUEUE_SIZE then + ngx.log(ngx.ERR, "deferred timer queue full") + return nil, "deferred timer queue full" + end + + table.insert(queue, { func = func, args = {...} }) + if not timer_started then + local ok, err = ngx.timer.at(0, flush_queue) + if ok then + -- unfortunately this is to deal with tests - when running unit tests, we + -- dont actually run the timer, we call the function inline + if util.tablelength(queue) > 0 then + timer_started = true + end + else + local msg = "failed to create timer: " .. tostring(err) + ngx.log(ngx.ERR, msg) + return nil, msg + end + end + return true +end + +return _M diff --git a/rootfs/etc/nginx/lua/util/split.lua b/rootfs/etc/nginx/lua/util/split.lua new file mode 100644 index 000000000..63282337f --- /dev/null +++ b/rootfs/etc/nginx/lua/util/split.lua @@ -0,0 +1,70 @@ +local _M = {} + +-- splits strings into host and port +local function parse_addr(addr) + local _, _, host, port = addr:find("([^:]+):([^:]+)") + if host and port then + return {host=host, port=port} + else + return nil, "error in parsing upstream address!" + end +end + +function _M.get_first_value(var) + local t = _M.split_upstream_var(var) or {} + if #t == 0 then return nil end + return t[1] +end + +function _M.split_pair(pair, seperator) + local i = pair:find(seperator) + if i == nil then + return pair, nil + else + local name = pair:sub(1, i - 1) + local value = pair:sub(i + 1, -1) + return name, value + end +end + +-- http://nginx.org/en/docs/http/ngx_http_upstream_module.html#example +-- CAVEAT: nginx is giving out : instead of , so the docs are wrong +-- 127.0.0.1:26157 : 127.0.0.1:26157 , ngx.var.upstream_addr +-- 200 : 200 , ngx.var.upstream_status +-- 0.00 : 0.00, ngx.var.upstream_response_time +function _M.split_upstream_var(var) + if not var then + return nil, nil + end + local t = {} + for v in var:gmatch("[^%s|,]+") do + if v ~= ":" then + t[#t+1] = v + end + end + return t +end + +-- Splits an NGINX $upstream_addr and returns an array of tables with a `host` and `port` key-value pair. +function _M.split_upstream_addr(addrs_str) + if not addrs_str then + return nil, nil + end + + local addrs = _M.split_upstream_var(addrs_str) + local host_and_ports = {} + + for _, v in ipairs(addrs) do + local a, err = parse_addr(v) + if err then + return nil, err + end + host_and_ports[#host_and_ports+1] = a + end + if #host_and_ports == 0 then + return nil, "no upstream addresses to parse!" + end + return host_and_ports +end + +return _M diff --git a/rootfs/etc/nginx/nginx.conf b/rootfs/etc/nginx/nginx.conf index bb36624ce..6f8e86b90 100644 --- a/rootfs/etc/nginx/nginx.conf +++ b/rootfs/etc/nginx/nginx.conf @@ -1,5 +1,5 @@ # A very simple nginx configuration file that forces nginx to start. -pid /run/nginx.pid; +pid /tmp/nginx.pid; events {} http {} diff --git a/rootfs/etc/nginx/template/nginx.tmpl b/rootfs/etc/nginx/template/nginx.tmpl index 50b3dc231..6ce6eb8d5 100644 --- a/rootfs/etc/nginx/template/nginx.tmpl +++ b/rootfs/etc/nginx/template/nginx.tmpl @@ -7,6 +7,11 @@ {{ $proxyHeaders := .ProxySetHeaders }} {{ $addHeaders := .AddHeaders }} +# Configuration checksum: {{ $all.Cfg.Checksum }} + +# setup custom paths that do not require root access +pid /tmp/nginx.pid; + {{ if $cfg.EnableModsecurity }} load_module /etc/nginx/modules/ngx_http_modsecurity_module.so; {{ end }} @@ -20,7 +25,6 @@ worker_processes {{ $cfg.WorkerProcesses }}; worker_cpu_affinity {{ $cfg.WorkerCpuAffinity }}; {{ end }} -pid /run/nginx.pid; {{ if ne .MaxOpenFiles 0 }} worker_rlimit_nofile {{ .MaxOpenFiles }}; {{ end }} @@ -67,6 +71,13 @@ http { balancer = res end {{ end }} + + ok, res = pcall(require, "monitor") + if not ok then + error("require failed: " .. tostring(res)) + else + monitor = res + end } {{ if $all.DynamicConfigurationEnabled }} @@ -97,11 +108,6 @@ http { geoip_proxy_recursive on; {{ end }} - {{ if $cfg.EnableVtsStatus }} - vhost_traffic_status_zone shared:vhost_traffic_status:{{ $cfg.VtsStatusZoneSize }}; - vhost_traffic_status_filter_by_set_key {{ $cfg.VtsDefaultFilterKey }}; - {{ end }} - aio threads; aio_write on; @@ -115,6 +121,10 @@ http { keepalive_timeout {{ $cfg.KeepAlive }}s; keepalive_requests {{ $cfg.KeepAliveRequests }}; + client_body_temp_path /tmp/client-body; + fastcgi_temp_path /tmp/fastcgi-temp; + proxy_temp_path /tmp/proxy-temp; + client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }}; client_header_timeout {{ $cfg.ClientHeaderTimeout }}s; large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }}; @@ -182,6 +192,7 @@ http { # $namespace # $ingress_name # $service_name + # $service_port log_format upstreaminfo {{ if $cfg.LogFormatEscapeJSON }}escape=json {{ end }}'{{ buildLogFormatUpstream $cfg }}'; {{/* map urls that should not appear in access.log */}} @@ -360,7 +371,7 @@ http { {{ range $name, $upstream := $backends }} {{ if eq $upstream.SessionAffinity.AffinityType "cookie" }} upstream sticky-{{ $upstream.Name }} { - sticky hash={{ $upstream.SessionAffinity.CookieSessionAffinity.Hash }} name={{ $upstream.SessionAffinity.CookieSessionAffinity.Name }} httponly; + sticky hash={{ $upstream.SessionAffinity.CookieSessionAffinity.Hash }} name={{ $upstream.SessionAffinity.CookieSessionAffinity.Name }}{{if eq (len $upstream.SessionAffinity.CookieSessionAffinity.Locations) 1 }}{{ range $locationName, $locationPaths := $upstream.SessionAffinity.CookieSessionAffinity.Locations }}{{ if eq (len $locationPaths) 1 }} path={{ index $locationPaths 0 }}{{ end }}{{ end }}{{ end }} httponly; {{ if (gt $cfg.UpstreamKeepaliveConnections 0) }} keepalive {{ $cfg.UpstreamKeepaliveConnections }}; @@ -529,14 +540,8 @@ http { opentracing off; {{ end }} - {{ if $cfg.EnableVtsStatus }} - vhost_traffic_status_display; - vhost_traffic_status_display_format html; - vhost_traffic_status_display_sum_key {{ $cfg.VtsSumKey }}; - {{ else }} access_log off; stub_status on; - {{ end }} } {{ if $all.DynamicConfigurationEnabled }} @@ -593,7 +598,7 @@ stream { {{ range $i, $tcpServer := .TCPBackends }} upstream tcp-{{ $tcpServer.Port }}-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }} { {{ range $j, $endpoint := $tcpServer.Endpoints }} - server {{ $endpoint.Address }}:{{ $endpoint.Port }}; + server {{ $endpoint.Address | formatIP }}:{{ $endpoint.Port }}; {{ end }} } server { @@ -622,7 +627,7 @@ stream { {{ range $i, $udpServer := .UDPBackends }} upstream udp-{{ $udpServer.Port }}-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }} { {{ range $j, $endpoint := $udpServer.Endpoints }} - server {{ $endpoint.Address }}:{{ $endpoint.Port }}; + server {{ $endpoint.Address | formatIP }}:{{ $endpoint.Port }}; {{ end }} } @@ -663,6 +668,7 @@ stream { proxy_set_header X-Namespace $namespace; proxy_set_header X-Ingress-Name $ingress_name; proxy_set_header X-Service-Name $service_name; + proxy_set_header X-Service-Port $service_port; rewrite (.*) / break; @@ -833,6 +839,13 @@ stream { {{ end }} location {{ $path }} { + {{ $ing := (getIngressInformation $location.Ingress $location.Path) }} + set $namespace "{{ $ing.Namespace }}"; + set $ingress_name "{{ $ing.Rule }}"; + set $service_name "{{ $ing.Service }}"; + set $service_port "{{ $location.Port }}"; + set $location_path "{{ $location.Path }}"; + {{ if not $all.DisableLua }} rewrite_by_lua_block { {{ if $all.DynamicConfigurationEnabled}} @@ -888,6 +901,8 @@ stream { {{ if $all.DynamicConfigurationEnabled}} balancer.log() {{ end }} + + monitor.call() } {{ end }} @@ -908,16 +923,8 @@ stream { port_in_redirect {{ if $location.UsePortInRedirects }}on{{ else }}off{{ end }}; - {{ if $all.Cfg.EnableVtsStatus }}{{ if $location.VtsFilterKey }} vhost_traffic_status_filter_by_set_key {{ $location.VtsFilterKey }};{{ end }}{{ end }} - set $proxy_upstream_name "{{ buildUpstreamName $server.Hostname $all.Backends $location $all.DynamicConfigurationEnabled }}"; - {{ $ing := (getIngressInformation $location.Ingress $location.Path) }} - {{/* $ing.Metadata contains the Ingress metadata */}} - set $namespace "{{ $ing.Namespace }}"; - set $ingress_name "{{ $ing.Rule }}"; - set $service_name "{{ $ing.Service }}"; - {{/* redirect to HTTPS can be achieved forcing the redirect or having a SSL Certificate configured for the server */}} {{ if (or $location.Rewrite.ForceSSLRedirect (and (not (empty $server.SSLCert.PemFileName)) $location.Rewrite.SSLRedirect)) }} {{ if not (isLocationInLocationList $location $all.Cfg.NoTLSRedirectLocations) }} @@ -1091,6 +1098,7 @@ stream { proxy_set_header X-Namespace $namespace; proxy_set_header X-Ingress-Name $ingress_name; proxy_set_header X-Service-Name $service_name; + proxy_set_header X-Service-Port $service_port; {{ end }} {{ if not (empty $location.Backend) }} diff --git a/test/data/config.json b/test/data/config.json index b023858aa..36ea6d64f 100644 --- a/test/data/config.json +++ b/test/data/config.json @@ -21,7 +21,6 @@ "bodySize": "1m", "enableDynamicTlsRecords": true, "enableSpdy": false, - "enableVtsStatus": true, "errorLogLevel": "notice", "gzipTypes": "application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component", "hsts": true, @@ -46,7 +45,6 @@ "useGzip": true, "useHttp2": true, "proxyStreamTimeout": "600s", - "vtsStatusZoneSize": "10m", "workerProcesses": 1, "limitConnZoneVariable": "$the_real_ip" }, @@ -117,9 +115,7 @@ "keyFilename": "", "caFilename": "", "pemSha": "" - }, - "vtsDefaultFilterKey": "$uri $server_name" - + } }, { "path": "/", "isDefBackend": true, diff --git a/test/e2e/annotations/affinity.go b/test/e2e/annotations/affinity.go index 30d31031c..83ad03ecf 100644 --- a/test/e2e/annotations/affinity.go +++ b/test/e2e/annotations/affinity.go @@ -149,4 +149,117 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity", func() { Expect(body).Should(ContainSubstring(fmt.Sprintf("request_uri=http://%v:8080/something/", host))) Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("SERVERID=")) }) + + It("should set the path to /something on the generated cookie", func() { + host := "example.com" + + ing, err := f.EnsureIngress(&v1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: host, + Namespace: f.IngressController.Namespace, + Annotations: map[string]string{ + "nginx.ingress.kubernetes.io/affinity": "cookie", + "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", + }, + }, + Spec: v1beta1.IngressSpec{ + Rules: []v1beta1.IngressRule{ + { + Host: host, + IngressRuleValue: v1beta1.IngressRuleValue{ + HTTP: &v1beta1.HTTPIngressRuleValue{ + Paths: []v1beta1.HTTPIngressPath{ + { + Path: "/something", + Backend: v1beta1.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + }) + + Expect(err).NotTo(HaveOccurred()) + Expect(ing).NotTo(BeNil()) + + err = f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_pass http://sticky-"+f.IngressController.Namespace+"-http-svc-80;") + }) + Expect(err).NotTo(HaveOccurred()) + + resp, _, errs := gorequest.New(). + Get(f.IngressController.HTTPURL+"/something"). + Set("Host", host). + End() + + Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Path=/something")) + }) + + It("should set the path to / on the generated cookie if there's more than one rule referring to the same backend", func() { + host := "example.com" + + ing, err := f.EnsureIngress(&v1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: host, + Namespace: f.IngressController.Namespace, + Annotations: map[string]string{ + "nginx.ingress.kubernetes.io/affinity": "cookie", + "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", + }, + }, + Spec: v1beta1.IngressSpec{ + Rules: []v1beta1.IngressRule{ + { + Host: host, + IngressRuleValue: v1beta1.IngressRuleValue{ + HTTP: &v1beta1.HTTPIngressRuleValue{ + Paths: []v1beta1.HTTPIngressPath{ + { + Path: "/something", + Backend: v1beta1.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, + { + Path: "/somewhereelese", + Backend: v1beta1.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + }) + + Expect(err).NotTo(HaveOccurred()) + Expect(ing).NotTo(BeNil()) + + err = f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_pass http://sticky-"+f.IngressController.Namespace+"-http-svc-80;") + }) + Expect(err).NotTo(HaveOccurred()) + + resp, _, errs := gorequest.New(). + Get(f.IngressController.HTTPURL+"/something"). + Set("Host", host). + End() + + Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Path=/;")) + }) }) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 6a628d64d..33addec8e 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -33,6 +33,7 @@ import ( _ "k8s.io/ingress-nginx/test/e2e/annotations" _ "k8s.io/ingress-nginx/test/e2e/defaultbackend" _ "k8s.io/ingress-nginx/test/e2e/lua" + _ "k8s.io/ingress-nginx/test/e2e/servicebackend" _ "k8s.io/ingress-nginx/test/e2e/settings" _ "k8s.io/ingress-nginx/test/e2e/ssl" ) diff --git a/test/e2e/lua/dynamic_configuration.go b/test/e2e/lua/dynamic_configuration.go index 17bad12de..370aa5094 100644 --- a/test/e2e/lua/dynamic_configuration.go +++ b/test/e2e/lua/dynamic_configuration.go @@ -36,6 +36,15 @@ import ( "k8s.io/ingress-nginx/test/e2e/framework" ) +const ( + logDynamicConfigSuccess = "Dynamic reconfiguration succeeded" + logDynamicConfigFailure = "Dynamic reconfiguration failed" + logRequireBackendReload = "Configuration changes detected, backend reload required" + logBackendReloadSuccess = "Backend successfully reloaded" + logSkipBackendReload = "Changes handled by the dynamic configuration, skipping backend reload" + logInitialConfigSync = "Initial synchronization of the NGINX configuration" +) + var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { f := framework.NewDefaultFramework("dynamic-configuration") @@ -69,8 +78,8 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { log, err := f.NginxLogs() Expect(err).ToNot(HaveOccurred()) - Expect(log).ToNot(ContainSubstring("could not dynamically reconfigure")) - Expect(log).To(ContainSubstring("first sync of Nginx configuration")) + Expect(log).ToNot(ContainSubstring(logDynamicConfigFailure)) + Expect(log).To(ContainSubstring(logDynamicConfigSuccess)) }) Context("when only backends change", func() { @@ -94,14 +103,14 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { restOfLogs := log[index:] By("POSTing new backends to Lua endpoint") - Expect(restOfLogs).To(ContainSubstring("dynamic reconfiguration succeeded")) - Expect(restOfLogs).ToNot(ContainSubstring("could not dynamically reconfigure")) + Expect(restOfLogs).To(ContainSubstring(logDynamicConfigSuccess)) + Expect(restOfLogs).ToNot(ContainSubstring(logDynamicConfigFailure)) By("skipping Nginx reload") - Expect(restOfLogs).ToNot(ContainSubstring("backend reload required")) - Expect(restOfLogs).ToNot(ContainSubstring("ingress backend successfully reloaded")) - Expect(restOfLogs).To(ContainSubstring("skipping reload")) - Expect(restOfLogs).ToNot(ContainSubstring("first sync of Nginx configuration")) + Expect(restOfLogs).ToNot(ContainSubstring(logRequireBackendReload)) + Expect(restOfLogs).ToNot(ContainSubstring(logBackendReloadSuccess)) + Expect(restOfLogs).To(ContainSubstring(logSkipBackendReload)) + Expect(restOfLogs).ToNot(ContainSubstring(logInitialConfigSync)) }) It("should be able to update endpoints even when the update POST size(request body) > size(client_body_buffer_size)", func() { @@ -164,14 +173,14 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { restOfLogs := log[index:] By("POSTing new backends to Lua endpoint") - Expect(restOfLogs).To(ContainSubstring("dynamic reconfiguration succeeded")) - Expect(restOfLogs).ToNot(ContainSubstring("could not dynamically reconfigure")) + Expect(restOfLogs).To(ContainSubstring(logDynamicConfigSuccess)) + Expect(restOfLogs).ToNot(ContainSubstring(logDynamicConfigFailure)) By("skipping Nginx reload") - Expect(restOfLogs).ToNot(ContainSubstring("backend reload required")) - Expect(restOfLogs).ToNot(ContainSubstring("ingress backend successfully reloaded")) - Expect(restOfLogs).To(ContainSubstring("skipping reload")) - Expect(restOfLogs).ToNot(ContainSubstring("first sync of Nginx configuration")) + Expect(restOfLogs).ToNot(ContainSubstring(logRequireBackendReload)) + Expect(restOfLogs).ToNot(ContainSubstring(logBackendReloadSuccess)) + Expect(restOfLogs).To(ContainSubstring(logSkipBackendReload)) + Expect(restOfLogs).ToNot(ContainSubstring(logInitialConfigSync)) }) }) @@ -208,10 +217,10 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { Expect(log).ToNot(BeEmpty()) By("reloading Nginx") - Expect(log).To(ContainSubstring("ingress backend successfully reloaded")) + Expect(log).To(ContainSubstring(logBackendReloadSuccess)) By("POSTing new backends to Lua endpoint") - Expect(log).To(ContainSubstring("dynamic reconfiguration succeeded")) + Expect(log).To(ContainSubstring(logDynamicConfigSuccess)) By("still be proxying requests through Lua balancer") err = f.WaitForNginxServer("foo.com", diff --git a/test/e2e/servicebackend/service_backend.go b/test/e2e/servicebackend/service_backend.go new file mode 100644 index 000000000..09968975c --- /dev/null +++ b/test/e2e/servicebackend/service_backend.go @@ -0,0 +1,166 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package servicebackend + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + corev1 "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/test/e2e/framework" + "strings" +) + +var _ = framework.IngressNginxDescribe("Service backend - 503", func() { + f := framework.NewDefaultFramework("service-backend") + + BeforeEach(func() { + }) + + AfterEach(func() { + }) + + It("should return 503 when backend service does not exist", func() { + host := "nonexistent.svc.com" + + bi := buildIngressWithNonexistentService(host, f.IngressController.Namespace, "/") + ing, err := f.EnsureIngress(bi) + Expect(err).NotTo(HaveOccurred()) + Expect(ing).NotTo(BeNil()) + + err = f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "return 503;") + }) + Expect(err).NotTo(HaveOccurred()) + + resp, _, errs := gorequest.New(). + Get(f.IngressController.HTTPURL). + Set("Host", host). + End() + Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(resp.StatusCode).Should(Equal(503)) + }) + + It("should return 503 when all backend service endpoints are unavailable", func() { + host := "unavailable.svc.com" + + bi, bs := buildIngressWithUnavailableServiceEndpoints(host, f.IngressController.Namespace, "/") + + svc, err := f.EnsureService(bs) + Expect(err).NotTo(HaveOccurred()) + Expect(svc).NotTo(BeNil()) + + ing, err := f.EnsureIngress(bi) + Expect(err).NotTo(HaveOccurred()) + Expect(ing).NotTo(BeNil()) + + err = f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "return 503;") + }) + Expect(err).NotTo(HaveOccurred()) + + resp, _, errs := gorequest.New(). + Get(f.IngressController.HTTPURL). + Set("Host", host). + End() + Expect(len(errs)).Should(BeNumerically("==", 0)) + Expect(resp.StatusCode).Should(Equal(503)) + }) + +}) + +func buildIngressWithNonexistentService(host, namespace, path string) *v1beta1.Ingress { + backendService := "nonexistent-svc" + return &v1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: host, + Namespace: namespace, + }, + Spec: v1beta1.IngressSpec{ + Rules: []v1beta1.IngressRule{ + { + Host: host, + IngressRuleValue: v1beta1.IngressRuleValue{ + HTTP: &v1beta1.HTTPIngressRuleValue{ + Paths: []v1beta1.HTTPIngressPath{ + { + Path: path, + Backend: v1beta1.IngressBackend{ + ServiceName: backendService, + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func buildIngressWithUnavailableServiceEndpoints(host, namespace, path string) (*v1beta1.Ingress, *corev1.Service) { + backendService := "unavailable-svc" + return &v1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: host, + Namespace: namespace, + }, + Spec: v1beta1.IngressSpec{ + Rules: []v1beta1.IngressRule{ + { + Host: host, + IngressRuleValue: v1beta1.IngressRuleValue{ + HTTP: &v1beta1.HTTPIngressRuleValue{ + Paths: []v1beta1.HTTPIngressPath{ + { + Path: path, + Backend: v1beta1.IngressBackend{ + ServiceName: backendService, + ServicePort: intstr.FromInt(80), + }, + }, + }, + }, + }, + }, + }, + }, + }, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: backendService, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{ + { + Name: "tcp", + Port: 80, + TargetPort: intstr.FromInt(80), + Protocol: "TCP", + }, + }, + Selector: map[string]string{ + "app": backendService, + }, + }, + } +} diff --git a/test/e2e/settings/configmap_change.go b/test/e2e/settings/configmap_change.go new file mode 100644 index 000000000..ff88687f7 --- /dev/null +++ b/test/e2e/settings/configmap_change.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "regexp" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Configmap change", func() { + f := framework.NewDefaultFramework("configmap-change") + + BeforeEach(func() { + err := f.NewEchoDeployment() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + }) + + It("should reload after an update in the configuration", func() { + host := "configmap-change" + + ing, err := f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil)) + Expect(err).NotTo(HaveOccurred()) + Expect(ing).NotTo(BeNil()) + + wlKey := "whitelist-source-range" + wlValue := "1.1.1.1" + + By("adding a whitelist-source-range") + + err = f.UpdateNginxConfigMapData(wlKey, wlValue) + Expect(err).NotTo(HaveOccurred()) + + checksumRegex := regexp.MustCompile("Configuration checksum:\\s+(\\d+)") + checksum := "" + + err = f.WaitForNginxConfiguration( + func(cfg string) bool { + // before returning, extract the current checksum + match := checksumRegex.FindStringSubmatch(cfg) + if len(match) > 0 { + checksum = match[1] + } + + return strings.Contains(cfg, "geo $the_real_ip $deny_") && + strings.Contains(cfg, "1.1.1.1 0") + }) + Expect(err).NotTo(HaveOccurred()) + Expect(checksum).NotTo(BeEmpty()) + + By("changing error-log-level") + + err = f.UpdateNginxConfigMapData("error-log-level", "debug") + Expect(err).NotTo(HaveOccurred()) + + newChecksum := "" + err = f.WaitForNginxConfiguration( + func(cfg string) bool { + match := checksumRegex.FindStringSubmatch(cfg) + if len(match) > 0 { + newChecksum = match[1] + } + + return strings.ContainsAny(cfg, "error_log /var/log/nginx/error.log debug;") + }) + Expect(err).NotTo(HaveOccurred()) + + Expect(checksum).NotTo(BeEquivalentTo(newChecksum)) + }) +}) diff --git a/test/e2e/wait-for-nginx.sh b/test/e2e/wait-for-nginx.sh index 52b628af3..72147db34 100755 --- a/test/e2e/wait-for-nginx.sh +++ b/test/e2e/wait-for-nginx.sh @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +set -e + DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" export NAMESPACE=$1 @@ -22,3 +24,9 @@ echo "deploying NGINX Ingress controller in namespace $NAMESPACE" sed "s@\${NAMESPACE}@${NAMESPACE}@" $DIR/../manifests/ingress-controller/mandatory.yaml | kubectl apply --namespace=$NAMESPACE -f - cat $DIR/../manifests/ingress-controller/service-nodeport.yaml | kubectl apply --namespace=$NAMESPACE -f - + +# wait for the deployment and fail if there is an error before starting the execution of any test +kubectl rollout status \ + --request-timeout=3m \ + --namespace $NAMESPACE \ + deployment nginx-ingress-controller diff --git a/test/manifests/ingress-controller/mandatory.yaml b/test/manifests/ingress-controller/mandatory.yaml index fe17a87db..b349f9f67 100644 --- a/test/manifests/ingress-controller/mandatory.yaml +++ b/test/manifests/ingress-controller/mandatory.yaml @@ -251,6 +251,14 @@ spec: - --publish-service=$(POD_NAMESPACE)/ingress-nginx - --annotations-prefix=nginx.ingress.kubernetes.io - --watch-namespace=${NAMESPACE} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + # www-data -> 33 + runAsUser: 33 env: - name: POD_NAME valueFrom: @@ -284,5 +292,3 @@ spec: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - securityContext: - privileged: true diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE new file mode 100644 index 000000000..a3866a291 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md new file mode 100644 index 000000000..28ce45a3e --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/README.md @@ -0,0 +1,65 @@ +# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure) + +hashstructure is a Go library for creating a unique hash value +for arbitrary values in Go. + +This can be used to key values in a hash (for use in a map, set, etc.) +that are complex. The most common use case is comparing two values without +sending data across the network, caching values locally (de-dup), and so on. + +## Features + + * Hash any arbitrary Go value, including complex types. + + * Tag a struct field to ignore it and not affect the hash value. + + * Tag a slice type struct field to treat it as a set where ordering + doesn't affect the hash code but the field itself is still taken into + account to create the hash value. + + * Optionally specify a custom hash function to optimize for speed, collision + avoidance for your data set, etc. + + * Optionally hash the output of `.String()` on structs that implement fmt.Stringer, + allowing effective hashing of time.Time + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/hashstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). + +A quick code example is shown below: + +```go +type ComplexStruct struct { + Name string + Age uint + Metadata map[string]interface{} +} + +v := ComplexStruct{ + Name: "mitchellh", + Age: 64, + Metadata: map[string]interface{}{ + "car": true, + "location": "California", + "siblings": []string{"Bob", "John"}, + }, +} + +hash, err := hashstructure.Hash(v, nil) +if err != nil { + panic(err) +} + +fmt.Printf("%d", hash) +// Output: +// 2307517237273902113 +``` diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go new file mode 100644 index 000000000..ea13a1583 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/hashstructure.go @@ -0,0 +1,358 @@ +package hashstructure + +import ( + "encoding/binary" + "fmt" + "hash" + "hash/fnv" + "reflect" +) + +// ErrNotStringer is returned when there's an error with hash:"string" +type ErrNotStringer struct { + Field string +} + +// Error implements error for ErrNotStringer +func (ens *ErrNotStringer) Error() string { + return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) +} + +// HashOptions are options that are available for hashing. +type HashOptions struct { + // Hasher is the hash function to use. If this isn't set, it will + // default to FNV. + Hasher hash.Hash64 + + // TagName is the struct tag to look at when hashing the structure. + // By default this is "hash". + TagName string + + // ZeroNil is flag determining if nil pointer should be treated equal + // to a zero value of pointed type. By default this is false. + ZeroNil bool +} + +// Hash returns the hash value of an arbitrary value. +// +// If opts is nil, then default options will be used. See HashOptions +// for the default values. The same *HashOptions value cannot be used +// concurrently. None of the values within a *HashOptions struct are +// safe to read/write while hashing is being done. +// +// Notes on the value: +// +// * Unexported fields on structs are ignored and do not affect the +// hash value. +// +// * Adding an exported field to a struct with the zero value will change +// the hash value. +// +// For structs, the hashing can be controlled using tags. For example: +// +// struct { +// Name string +// UUID string `hash:"ignore"` +// } +// +// The available tag values are: +// +// * "ignore" or "-" - The field will be ignored and not affect the hash code. +// +// * "set" - The field will be treated as a set, where ordering doesn't +// affect the hash code. This only works for slices. +// +// * "string" - The field will be hashed as a string, only works when the +// field implements fmt.Stringer +// +func Hash(v interface{}, opts *HashOptions) (uint64, error) { + // Create default options + if opts == nil { + opts = &HashOptions{} + } + if opts.Hasher == nil { + opts.Hasher = fnv.New64() + } + if opts.TagName == "" { + opts.TagName = "hash" + } + + // Reset the hash + opts.Hasher.Reset() + + // Create our walker and walk the structure + w := &walker{ + h: opts.Hasher, + tag: opts.TagName, + zeronil: opts.ZeroNil, + } + return w.visit(reflect.ValueOf(v), nil) +} + +type walker struct { + h hash.Hash64 + tag string + zeronil bool +} + +type visitOpts struct { + // Flags are a bitmask of flags to affect behavior of this visit + Flags visitFlag + + // Information about the struct containing this field + Struct interface{} + StructField string +} + +func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { + t := reflect.TypeOf(0) + + // Loop since these can be wrapped in multiple layers of pointers + // and interfaces. + for { + // If we have an interface, dereference it. We have to do this up + // here because it might be a nil in there and the check below must + // catch that. + if v.Kind() == reflect.Interface { + v = v.Elem() + continue + } + + if v.Kind() == reflect.Ptr { + if w.zeronil { + t = v.Type().Elem() + } + v = reflect.Indirect(v) + continue + } + + break + } + + // If it is nil, treat it like a zero. + if !v.IsValid() { + v = reflect.Zero(t) + } + + // Binary writing can use raw ints, we have to convert to + // a sized-int, we'll choose the largest... + switch v.Kind() { + case reflect.Int: + v = reflect.ValueOf(int64(v.Int())) + case reflect.Uint: + v = reflect.ValueOf(uint64(v.Uint())) + case reflect.Bool: + var tmp int8 + if v.Bool() { + tmp = 1 + } + v = reflect.ValueOf(tmp) + } + + k := v.Kind() + + // We can shortcut numeric values by directly binary writing them + if k >= reflect.Int && k <= reflect.Complex64 { + // A direct hash calculation + w.h.Reset() + err := binary.Write(w.h, binary.LittleEndian, v.Interface()) + return w.h.Sum64(), err + } + + switch k { + case reflect.Array: + var h uint64 + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + h = hashUpdateOrdered(w.h, h, current) + } + + return h, nil + + case reflect.Map: + var includeMap IncludableMap + if opts != nil && opts.Struct != nil { + if v, ok := opts.Struct.(IncludableMap); ok { + includeMap = v + } + } + + // Build the hash for the map. We do this by XOR-ing all the key + // and value hashes. This makes it deterministic despite ordering. + var h uint64 + for _, k := range v.MapKeys() { + v := v.MapIndex(k) + if includeMap != nil { + incl, err := includeMap.HashIncludeMap( + opts.StructField, k.Interface(), v.Interface()) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + kh, err := w.visit(k, nil) + if err != nil { + return 0, err + } + vh, err := w.visit(v, nil) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + + return h, nil + + case reflect.Struct: + parent := v.Interface() + var include Includable + if impl, ok := parent.(Includable); ok { + include = impl + } + + t := v.Type() + h, err := w.visit(reflect.ValueOf(t.Name()), nil) + if err != nil { + return 0, err + } + + l := v.NumField() + for i := 0; i < l; i++ { + if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + var f visitFlag + fieldType := t.Field(i) + if fieldType.PkgPath != "" { + // Unexported + continue + } + + tag := fieldType.Tag.Get(w.tag) + if tag == "ignore" || tag == "-" { + // Ignore this field + continue + } + + // if string is set, use the string value + if tag == "string" { + if impl, ok := innerV.Interface().(fmt.Stringer); ok { + innerV = reflect.ValueOf(impl.String()) + } else { + return 0, &ErrNotStringer{ + Field: v.Type().Field(i).Name, + } + } + } + + // Check if we implement includable and check it + if include != nil { + incl, err := include.HashInclude(fieldType.Name, innerV) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + switch tag { + case "set": + f |= visitFlagSet + } + + kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) + if err != nil { + return 0, err + } + + vh, err := w.visit(innerV, &visitOpts{ + Flags: f, + Struct: parent, + StructField: fieldType.Name, + }) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + } + + return h, nil + + case reflect.Slice: + // We have two behaviors here. If it isn't a set, then we just + // visit all the elements. If it is a set, then we do a deterministic + // hash code. + var h uint64 + var set bool + if opts != nil { + set = (opts.Flags & visitFlagSet) != 0 + } + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + if set { + h = hashUpdateUnordered(h, current) + } else { + h = hashUpdateOrdered(w.h, h, current) + } + } + + return h, nil + + case reflect.String: + // Directly hash + w.h.Reset() + _, err := w.h.Write([]byte(v.String())) + return w.h.Sum64(), err + + default: + return 0, fmt.Errorf("unknown kind to hash: %s", k) + } + +} + +func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { + // For ordered updates, use a real hash function + h.Reset() + + // We just panic if the binary writes fail because we are writing + // an int64 which should never be fail-able. + e1 := binary.Write(h, binary.LittleEndian, a) + e2 := binary.Write(h, binary.LittleEndian, b) + if e1 != nil { + panic(e1) + } + if e2 != nil { + panic(e2) + } + + return h.Sum64() +} + +func hashUpdateUnordered(a, b uint64) uint64 { + return a ^ b +} + +// visitFlag is used as a bitmask for affecting visit behavior +type visitFlag uint + +const ( + visitFlagInvalid visitFlag = iota + visitFlagSet = iota << 1 +) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go new file mode 100644 index 000000000..b6289c0be --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/include.go @@ -0,0 +1,15 @@ +package hashstructure + +// Includable is an interface that can optionally be implemented by +// a struct. It will be called for each field in the struct to check whether +// it should be included in the hash. +type Includable interface { + HashInclude(field string, v interface{}) (bool, error) +} + +// IncludableMap is an interface that can optionally be implemented by +// a struct. It will be called when a map-type field is found to ask the +// struct if the map item should be included in the hash. +type IncludableMap interface { + HashIncludeMap(field string, k, v interface{}) (bool, error) +}