diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2e61ac31d..5f7ff1f91 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,10 +22,11 @@ If you're new to the project and want to help, but don't know where to start, we ## Contributing A Patch 1. If you haven't already done so, sign a Contributor License Agreement (see details above). +1. Read the [Ingress development guide](docs/dev/README.md) 1. Fork the desired repo, develop and test your code changes. 1. Submit a pull request. -All changes must be code reviewed. Coding conventions and standards are explained in the official [developer docs](https://github.com/kubernetes/kubernetes/tree/8a2c639bfb2087a9a89c02d2dc30fcb9bd0846f6/docs/devel). Expect reviewers to request that you avoid common [go style mistakes](https://github.com/golang/go/wiki/CodeReviewComments) in your PRs. +All changes must be code reviewed. Coding conventions and standards are explained in the official [developer docs](https://github.com/kubernetes/kubernetes/tree/master/docs/devel). Expect reviewers to request that you avoid common [go style mistakes](https://github.com/golang/go/wiki/CodeReviewComments) in your PRs. ### Merge Approval diff --git a/Makefile b/Makefile index 6f3e1f5e6..12e57452d 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -all: fmt lint vet cover +all: fmt lint vet BUILDTAGS= diff --git a/OWNERS b/OWNERS index 163563012..a394ff383 100644 --- a/OWNERS +++ b/OWNERS @@ -3,3 +3,4 @@ assignees: - justinsb - bprashanth - thockin +- nicksardo diff --git a/controllers/README.md b/controllers/README.md index 6df8a3abf..e62cf55b6 100644 --- a/controllers/README.md +++ b/controllers/README.md @@ -8,4 +8,4 @@ Configuring a webserver or loadbalancer is harder than it should be. Most webser ## What is an Ingress Controller? -An Ingress Controller is a daemon, deployed as a Kubernetes Pod, that watches the ApiServer's `/ingresses` endpoint for updates to the [Ingress resource](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/ingress.md). Its job is to satisfy requests for ingress. +An Ingress Controller is a daemon, deployed as a Kubernetes Pod, that watches the apiserver's `/ingresses` endpoint for updates to the [Ingress resource](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/ingress.md). Its job is to satisfy requests for ingress. diff --git a/controllers/gce/Dockerfile b/controllers/gce/Dockerfile index 285b3491b..6ba8be13a 100644 --- a/controllers/gce/Dockerfile +++ b/controllers/gce/Dockerfile @@ -12,23 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -# TODO: use radial/busyboxplus:curl or alping instead -FROM ubuntu:14.04 -MAINTAINER Prashanth B +FROM alpine:3.5 -# so apt-get doesn't complain -ENV DEBIAN_FRONTEND=noninteractive -RUN sed -i 's/^exit 101/exit 0/' /usr/sbin/policy-rc.d +RUN apk add --no-cache ca-certificates -# TODO: Move to using haproxy:1.5 image instead. Honestly, -# that image isn't much smaller and the convenience of having -# an ubuntu container for dev purposes trumps the tiny amounts -# of disk and bandwidth we'd save in doing so. -RUN \ - apt-get update && \ - apt-get install -y ca-certificates && \ - apt-get install -y curl && \ - rm -rf /var/lib/apt/lists/* - -ADD glbc glbc +COPY glbc glbc ENTRYPOINT ["/glbc"] diff --git a/controllers/gce/Makefile b/controllers/gce/Makefile index 5300ca93c..927d95fcd 100644 --- a/controllers/gce/Makefile +++ b/controllers/gce/Makefile @@ -1,17 +1,17 @@ all: push # 0.0 shouldn't clobber any released builds -TAG = 0.9.0 +TAG = 0.9.2 PREFIX = gcr.io/google_containers/glbc server: CGO_ENABLED=0 GOOS=linux godep go build -a -installsuffix cgo -ldflags '-w' -o glbc *.go container: server - docker build -t $(PREFIX):$(TAG) . + docker build --pull -t $(PREFIX):$(TAG) . push: container - gcloud docker push $(PREFIX):$(TAG) + gcloud docker -- push $(PREFIX):$(TAG) clean: rm -f glbc diff --git a/controllers/gce/OWNERS b/controllers/gce/OWNERS new file mode 100644 index 000000000..5b12a649d --- /dev/null +++ b/controllers/gce/OWNERS @@ -0,0 +1,6 @@ +approvers: +- nicksardo +- thockin +- freehan +- csbell +- bprashanth diff --git a/controllers/gce/README.md b/controllers/gce/README.md index aa1d6bd8d..1dbf3b0e4 100644 --- a/controllers/gce/README.md +++ b/controllers/gce/README.md @@ -18,7 +18,7 @@ __A reminder on Services__: A Kubernetes Service defines a set of pods and a mea ### L7 Load balancing on Kubernetes -To achive L7 loadbalancing through Kubernetes, we employ a resource called `Ingress`. The Ingress is consumed by this loadbalancer controller, which creates the following GCE resource graph: +To achieve L7 loadbalancing through Kubernetes, we employ a resource called `Ingress`. The Ingress is consumed by this loadbalancer controller, which creates the following GCE resource graph: [Global Forwarding Rule](https://cloud.google.com/compute/docs/load-balancing/http/global-forwarding-rules) -> [TargetHttpProxy](https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) -> [Url Map](https://cloud.google.com/compute/docs/load-balancing/http/url-map) -> [Backend Service](https://cloud.google.com/compute/docs/load-balancing/http/backend-service) -> [Instance Group](https://cloud.google.com/compute/docs/instance-groups/) @@ -327,7 +327,7 @@ So simply delete the replication controller: $ kubectl get rc glbc CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE glbc default-http-backend gcr.io/google_containers/defaultbackend:1.0 k8s-app=glbc,version=v0.5 1 2m - l7-lb-controller gcr.io/google_containers/glbc:0.9.0 + l7-lb-controller gcr.io/google_containers/glbc:0.9.2 $ kubectl delete rc glbc replicationcontroller "glbc" deleted @@ -340,7 +340,7 @@ glbc-6m6b6 1/1 Terminating 0 13m __The prod way__: If you didn't start the controller with `--delete-all-on-quit`, you can execute a GET on the `/delete-all-and-quit` endpoint. This endpoint is deliberately not exported. ``` -$ kubectl exec -it glbc-6m6b6 -- curl http://localhost:8081/delete-all-and-quit +$ kubectl exec -it glbc-6m6b6 -- wget -q -O- http://localhost:8081/delete-all-and-quit ..Hangs till quit is done.. $ kubectl logs glbc-6m6b6 --follow diff --git a/controllers/gce/backends/backends.go b/controllers/gce/backends/backends.go index 18686b968..07f0b3fae 100644 --- a/controllers/gce/backends/backends.go +++ b/controllers/gce/backends/backends.go @@ -20,6 +20,7 @@ import ( "fmt" "net/http" "strconv" + "strings" "time" "k8s.io/kubernetes/pkg/util/sets" @@ -32,6 +33,41 @@ import ( "k8s.io/ingress/controllers/gce/utils" ) +// BalancingMode represents the loadbalancing configuration of an individual +// Backend in a BackendService. This is *effectively* a cluster wide setting +// since you can't mix modes across Backends pointing to the same IG, and you +// can't have a single node in more than 1 loadbalanced IG. +type BalancingMode string + +const ( + // Rate balances incoming requests based on observed RPS. + // As of this writing, it's the only balancing mode supported by GCE's + // internal LB. This setting doesn't make sense for Kubernets clusters + // because requests can get proxied between instance groups in different + // zones by kube-proxy without GCE even knowing it. Setting equal RPS on + // all IGs should achieve roughly equal distribution of requests. + Rate BalancingMode = "RATE" + // Utilization balances incoming requests based on observed utilization. + // This mode is only useful if you want to divert traffic away from IGs + // running other compute intensive workloads. Utilization statistics are + // aggregated per instances, not per container, and requests can get proxied + // between instance groups in different zones by kube-proxy without GCE even + // knowing about it. + Utilization BalancingMode = "UTILIZATION" + // Connections balances incoming requests based on a connection counter. + // This setting currently doesn't make sense for Kubernetes clusters, + // because we use NodePort Services as HTTP LB backends, so GCE's connection + // counters don't accurately represent connections per container. + Connections BalancingMode = "CONNECTION" +) + +// maxRPS is the RPS setting for all Backends with BalancingMode RATE. The exact +// value doesn't matter, as long as it's the same for all Backends. Requests +// received by GCLB above this RPS are NOT dropped, GCLB continues to distribute +// them across IGs. +// TODO: Should this be math.MaxInt64? +const maxRPS = 1 + // Backends implements BackendPool. type Backends struct { cloud BackendServices @@ -116,20 +152,49 @@ func (b *Backends) create(igs []*compute.InstanceGroup, namedPort *compute.Named if err != nil { return nil, err } - // Create a new backend - backend := &compute.BackendService{ - Name: name, - Protocol: "HTTP", - Backends: getBackendsForIGs(igs), - // Api expects one, means little to kubernetes. - HealthChecks: []string{hc.SelfLink}, - Port: namedPort.Port, - PortName: namedPort.Name, + errs := []string{} + // We first try to create the backend with balancingMode=RATE. If this + // fails, it's mostly likely because there are existing backends with + // balancingMode=UTILIZATION. This failure mode throws a googleapi error + // which wraps a HTTP 400 status code. We handle it in the loop below + // and come around to retry with the right balancing mode. The goal is to + // switch everyone to using RATE. + for _, bm := range []BalancingMode{Rate, Utilization} { + backends := getBackendsForIGs(igs) + for _, b := range backends { + switch bm { + case Rate: + b.MaxRate = maxRPS + default: + // TODO: Set utilization and connection limits when we accept them + // as valid fields. + } + b.BalancingMode = string(bm) + } + // Create a new backend + backend := &compute.BackendService{ + Name: name, + Protocol: "HTTP", + Backends: backends, + HealthChecks: []string{hc.SelfLink}, + Port: namedPort.Port, + PortName: namedPort.Name, + } + if err := b.cloud.CreateBackendService(backend); err != nil { + // This is probably a failure because we tried to create the backend + // with balancingMode=RATE when there are already backends with + // balancingMode=UTILIZATION. Just ignore it and retry setting + // balancingMode=UTILIZATION (b/35102911). + if utils.IsHTTPErrorCode(err, http.StatusBadRequest) { + glog.Infof("Error creating backend service with balancing mode %v:%v", bm, err) + errs = append(errs, fmt.Sprintf("%v", err)) + continue + } + return nil, err + } + return b.Get(namedPort.Port) } - if err := b.cloud.CreateBackendService(backend); err != nil { - return nil, err - } - return b.Get(namedPort.Port) + return nil, fmt.Errorf("%v", strings.Join(errs, "\n")) } // Add will get or create a Backend for the given port. diff --git a/controllers/gce/backends/backends_test.go b/controllers/gce/backends/backends_test.go index 951cbb1cd..08afd35f6 100644 --- a/controllers/gce/backends/backends_test.go +++ b/controllers/gce/backends/backends_test.go @@ -17,6 +17,7 @@ limitations under the License. package backends import ( + "net/http" "testing" compute "google.golang.org/api/compute/v1" @@ -25,10 +26,14 @@ import ( "k8s.io/ingress/controllers/gce/storage" "k8s.io/ingress/controllers/gce/utils" "k8s.io/kubernetes/pkg/util/sets" + + "google.golang.org/api/googleapi" ) const defaultZone = "zone-a" +var noOpErrFunc = func(op int, be *compute.BackendService) error { return nil } + func newBackendPool(f BackendServices, fakeIGs instances.InstanceGroups, syncWithCloud bool) BackendPool { namer := &utils.Namer{} nodePool := instances.NewNodePool(fakeIGs) @@ -40,7 +45,7 @@ func newBackendPool(f BackendServices, fakeIGs instances.InstanceGroups, syncWit } func TestBackendPoolAdd(t *testing.T) { - f := NewFakeBackendServices() + f := NewFakeBackendServices(noOpErrFunc) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) pool := newBackendPool(f, fakeIGs, false) namer := utils.Namer{} @@ -110,7 +115,7 @@ func TestBackendPoolSync(t *testing.T) { // Call sync on a backend pool with a list of ports, make sure the pool // creates/deletes required ports. svcNodePorts := []int64{81, 82, 83} - f := NewFakeBackendServices() + f := NewFakeBackendServices(noOpErrFunc) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) pool := newBackendPool(f, fakeIGs, true) pool.Add(81) @@ -174,7 +179,7 @@ func TestBackendPoolSync(t *testing.T) { } func TestBackendPoolShutdown(t *testing.T) { - f := NewFakeBackendServices() + f := NewFakeBackendServices(noOpErrFunc) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) pool := newBackendPool(f, fakeIGs, false) namer := utils.Namer{} @@ -187,7 +192,7 @@ func TestBackendPoolShutdown(t *testing.T) { } func TestBackendInstanceGroupClobbering(t *testing.T) { - f := NewFakeBackendServices() + f := NewFakeBackendServices(noOpErrFunc) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) pool := newBackendPool(f, fakeIGs, false) namer := utils.Namer{} @@ -230,3 +235,40 @@ func TestBackendInstanceGroupClobbering(t *testing.T) { t.Fatalf("Expected %v Got %v", expectedGroups, gotGroups) } } + +func TestBackendCreateBalancingMode(t *testing.T) { + f := NewFakeBackendServices(noOpErrFunc) + + fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) + pool := newBackendPool(f, fakeIGs, false) + namer := utils.Namer{} + nodePort := int64(8080) + modes := []BalancingMode{Rate, Utilization} + + // block the creation of Backends with the given balancingMode + // and verify that a backend with the other balancingMode is + // created + for i, bm := range modes { + f.errFunc = func(op int, be *compute.BackendService) error { + for _, b := range be.Backends { + if b.BalancingMode == string(bm) { + return &googleapi.Error{Code: http.StatusBadRequest} + } + } + return nil + } + + pool.Add(nodePort) + be, err := f.GetBackendService(namer.BeName(nodePort)) + if err != nil { + t.Fatalf("%v", err) + } + + for _, b := range be.Backends { + if b.BalancingMode != string(modes[(i+1)%len(modes)]) { + t.Fatalf("Wrong balancing mode, expected %v got %v", modes[(i+1)%len(modes)], b.BalancingMode) + } + } + pool.GC([]int64{}) + } +} diff --git a/controllers/gce/backends/fakes.go b/controllers/gce/backends/fakes.go index a5eb1d006..bb2b031f0 100644 --- a/controllers/gce/backends/fakes.go +++ b/controllers/gce/backends/fakes.go @@ -25,8 +25,9 @@ import ( ) // NewFakeBackendServices creates a new fake backend services manager. -func NewFakeBackendServices() *FakeBackendServices { +func NewFakeBackendServices(ef func(op int, be *compute.BackendService) error) *FakeBackendServices { return &FakeBackendServices{ + errFunc: ef, backendServices: cache.NewStore(func(obj interface{}) (string, error) { svc := obj.(*compute.BackendService) return svc.Name, nil @@ -38,6 +39,7 @@ func NewFakeBackendServices() *FakeBackendServices { type FakeBackendServices struct { backendServices cache.Store calls []int + errFunc func(op int, be *compute.BackendService) error } // GetBackendService fakes getting a backend service from the cloud. @@ -60,6 +62,11 @@ func (f *FakeBackendServices) GetBackendService(name string) (*compute.BackendSe // CreateBackendService fakes backend service creation. func (f *FakeBackendServices) CreateBackendService(be *compute.BackendService) error { + if f.errFunc != nil { + if err := f.errFunc(utils.Create, be); err != nil { + return err + } + } f.calls = append(f.calls, utils.Create) be.SelfLink = be.Name return f.backendServices.Update(be) diff --git a/controllers/gce/controller/cluster_manager.go b/controllers/gce/controller/cluster_manager.go index c16ddb709..993644413 100644 --- a/controllers/gce/controller/cluster_manager.go +++ b/controllers/gce/controller/cluster_manager.go @@ -243,7 +243,7 @@ func NewClusterManager( defaultHealthCheckPath string) (*ClusterManager, error) { // TODO: Make this more resilient. Currently we create the cloud client - // and pass it through to all the pools. This makes unittesting easier. + // and pass it through to all the pools. This makes unit testing easier. // However if the cloud client suddenly fails, we should try to re-create it // and continue. var cloud *gce.GCECloud diff --git a/controllers/gce/controller/controller.go b/controllers/gce/controller/controller.go index c4d15e2fd..69f6a2533 100644 --- a/controllers/gce/controller/controller.go +++ b/controllers/gce/controller/controller.go @@ -46,6 +46,10 @@ var ( // L7 controller created without specifying the --cluster-uid flag. DefaultClusterUID = "" + // DefaultFirewallName is the name to user for firewall rules created + // by an L7 controller when the --fireall-rule is not used. + DefaultFirewallName = "" + // Frequency to poll on local stores to sync. storeSyncPollPeriod = 5 * time.Second ) @@ -423,14 +427,23 @@ func (lbc *LoadBalancerController) ListRuntimeInfo() (lbs []*loadbalancers.L7Run glog.Warningf("Cannot get key for Ingress %v/%v: %v", ing.Namespace, ing.Name, err) continue } - tls, err := lbc.tlsLoader.load(&ing) - if err != nil { - glog.Warningf("Cannot get certs for Ingress %v/%v: %v", ing.Namespace, ing.Name, err) - } + + var tls *loadbalancers.TLSCerts + annotations := ingAnnotations(ing.ObjectMeta.Annotations) + // Load the TLS cert from the API Spec if it is not specified in the annotation. + // TODO: enforce this with validation. + if annotations.useNamedTLS() == "" { + tls, err = lbc.tlsLoader.load(&ing) + if err != nil { + glog.Warningf("Cannot get certs for Ingress %v/%v: %v", ing.Namespace, ing.Name, err) + } + } + lbs = append(lbs, &loadbalancers.L7RuntimeInfo{ Name: k, TLS: tls, + TLSName: annotations.useNamedTLS(), AllowHTTP: annotations.allowHTTP(), StaticIPName: annotations.staticIPName(), }) diff --git a/controllers/gce/controller/controller_test.go b/controllers/gce/controller/controller_test.go index cc58e94b5..f8d905b44 100644 --- a/controllers/gce/controller/controller_test.go +++ b/controllers/gce/controller/controller_test.go @@ -199,7 +199,8 @@ func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePo } func TestLbCreateDelete(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + testFirewallName := "quux" + cm := NewFakeClusterManager(DefaultClusterUID, testFirewallName) lbc := newLoadBalancerController(t, cm, "") inputMap1 := map[string]utils.FakeIngressRuleValueMap{ "foo.example.com": { @@ -240,6 +241,7 @@ func TestLbCreateDelete(t *testing.T) { unexpected := []int{pm.portMap["foo2svc"], pm.portMap["bar2svc"]} expected := []int{pm.portMap["foo1svc"], pm.portMap["bar1svc"]} firewallPorts := sets.NewString() + pm.namer.SetFirewallName(testFirewallName) firewallName := pm.namer.FrName(pm.namer.FrSuffix()) if firewallRule, err := cm.firewallPool.(*firewalls.FirewallRules).GetFirewall(firewallName); err != nil { @@ -290,7 +292,7 @@ func TestLbCreateDelete(t *testing.T) { } func TestLbFaultyUpdate(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") inputMap := map[string]utils.FakeIngressRuleValueMap{ "foo.example.com": { @@ -327,7 +329,7 @@ func TestLbFaultyUpdate(t *testing.T) { } func TestLbDefaulting(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") // Make sure the controller plugs in the default values accepted by GCE. ing := newIngress(map[string]utils.FakeIngressRuleValueMap{"": {"": "foo1svc"}}) @@ -345,7 +347,7 @@ func TestLbDefaulting(t *testing.T) { } func TestLbNoService(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") inputMap := map[string]utils.FakeIngressRuleValueMap{ "foo.example.com": { @@ -389,7 +391,7 @@ func TestLbNoService(t *testing.T) { } func TestLbChangeStaticIP(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") inputMap := map[string]utils.FakeIngressRuleValueMap{ "foo.example.com": { diff --git a/controllers/gce/controller/fakes.go b/controllers/gce/controller/fakes.go index cfa3ed08f..a4870593c 100644 --- a/controllers/gce/controller/fakes.go +++ b/controllers/gce/controller/fakes.go @@ -20,6 +20,7 @@ import ( "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/sets" + compute "google.golang.org/api/compute/v1" "k8s.io/ingress/controllers/gce/backends" "k8s.io/ingress/controllers/gce/firewalls" "k8s.io/ingress/controllers/gce/healthchecks" @@ -43,12 +44,12 @@ type fakeClusterManager struct { } // NewFakeClusterManager creates a new fake ClusterManager. -func NewFakeClusterManager(clusterName string) *fakeClusterManager { +func NewFakeClusterManager(clusterName, firewallName string) *fakeClusterManager { fakeLbs := loadbalancers.NewFakeLoadBalancers(clusterName) - fakeBackends := backends.NewFakeBackendServices() + fakeBackends := backends.NewFakeBackendServices(func(op int, be *compute.BackendService) error { return nil }) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) fakeHCs := healthchecks.NewFakeHealthChecks() - namer := utils.NewNamer(clusterName) + namer := utils.NewNamer(clusterName, firewallName) nodePool := instances.NewNodePool(fakeIGs) nodePool.Init(&instances.FakeZoneLister{Zones: []string{"zone-a"}}) diff --git a/controllers/gce/controller/util_test.go b/controllers/gce/controller/util_test.go index a3bbbe120..38f969c63 100644 --- a/controllers/gce/controller/util_test.go +++ b/controllers/gce/controller/util_test.go @@ -32,7 +32,7 @@ import ( var firstPodCreationTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC) func TestZoneListing(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") zoneToNode := map[string][]string{ "zone-1": {"n1"}, @@ -57,7 +57,7 @@ func TestZoneListing(t *testing.T) { } func TestInstancesAddedToZones(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") zoneToNode := map[string][]string{ "zone-1": {"n1", "n2"}, @@ -92,7 +92,7 @@ func TestInstancesAddedToZones(t *testing.T) { } func TestProbeGetter(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") nodePortToHealthCheck := map[int64]string{ 3001: "/healthz", @@ -110,7 +110,7 @@ func TestProbeGetter(t *testing.T) { } func TestProbeGetterNamedPort(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") nodePortToHealthCheck := map[int64]string{ 3001: "/healthz", @@ -133,7 +133,7 @@ func TestProbeGetterNamedPort(t *testing.T) { } func TestProbeGetterCrossNamespace(t *testing.T) { - cm := NewFakeClusterManager(DefaultClusterUID) + cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName) lbc := newLoadBalancerController(t, cm, "") firstPod := &api.Pod{ diff --git a/controllers/gce/controller/utils.go b/controllers/gce/controller/utils.go index 617ce5fad..b57020cc3 100644 --- a/controllers/gce/controller/utils.go +++ b/controllers/gce/controller/utils.go @@ -52,6 +52,13 @@ const ( // responsibility to create/delete it. staticIPNameKey = "kubernetes.io/ingress.global-static-ip-name" + // preSharedCertKey represents the specific pre-shared SSL + // certicate for the Ingress controller to use. The controller *does not* + // manage this certificate, it is the users responsibility to create/delete it. + // In GCP, the Ingress controller assigns the SSL certificate with this name + // to the target proxies of the Ingress. + preSharedCertKey = "ingress.gcp.kubernetes.io/pre-shared-cert" + // ingressClassKey picks a specific "class" for the Ingress. The controller // only processes Ingresses with this annotation either unset, or set // to either gceIngessClass or the empty string. @@ -79,6 +86,16 @@ func (ing ingAnnotations) allowHTTP() bool { return v } +// useNamedTLS returns the name of the GCE SSL certificate. Empty by default. +func (ing ingAnnotations) useNamedTLS() string { + val, ok := ing[preSharedCertKey] + if !ok { + return "" + } + + return val +} + func (ing ingAnnotations) staticIPName() string { val, ok := ing[staticIPNameKey] if !ok { diff --git a/controllers/gce/healthchecks/healthchecks_test.go b/controllers/gce/healthchecks/healthchecks_test.go new file mode 100644 index 000000000..9db1edd49 --- /dev/null +++ b/controllers/gce/healthchecks/healthchecks_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthchecks + +import ( + "testing" + + "k8s.io/ingress/controllers/gce/utils" +) + +func TestFakeHealthCheckActions(t *testing.T) { + namer := &utils.Namer{} + healthChecks := NewHealthChecker(NewFakeHealthChecks(), "/", namer) + healthChecks.Init(&FakeHealthCheckGetter{DefaultHealthCheck: nil}) + + err := healthChecks.Add(80) + if err != nil { + t.Fatalf("unexpected error") + } + + _, err1 := healthChecks.Get(8080) + if err1 == nil { + t.Errorf("expected error") + } + + hc, err2 := healthChecks.Get(80) + if err2 != nil { + t.Errorf("unexpected error") + } else { + if hc == nil { + t.Errorf("expected a *compute.HttpHealthCheck") + } + } + + err = healthChecks.Delete(8080) + if err == nil { + t.Errorf("expected error") + } + + err = healthChecks.Delete(80) + if err != nil { + t.Errorf("unexpected error") + } + + _, err3 := healthChecks.Get(80) + if err3 == nil { + t.Errorf("expected error") + } +} diff --git a/controllers/gce/loadbalancers/loadbalancers.go b/controllers/gce/loadbalancers/loadbalancers.go index 948718f7f..9688a4cc0 100644 --- a/controllers/gce/loadbalancers/loadbalancers.go +++ b/controllers/gce/loadbalancers/loadbalancers.go @@ -246,6 +246,8 @@ type L7RuntimeInfo struct { IP string // TLS are the tls certs to use in termination. TLS *TLSCerts + // TLSName is the name of/for the tls cert to use. + TLSName string // AllowHTTP will not setup :80, if TLS is nil and AllowHTTP is set, // no loadbalancer is created. AllowHTTP bool @@ -350,6 +352,24 @@ func (l *L7) deleteOldSSLCert() (err error) { } func (l *L7) checkSSLCert() (err error) { + certName := l.runtimeInfo.TLSName + + // Use the named GCE cert when it is specified by the annotation. + if certName != "" { + // Ask GCE for the cert, checking for problems and existence. + cert, err := l.cloud.GetSslCertificate(certName) + if err != nil { + return err + } + if cert == nil { + return fmt.Errorf("Cannot find existing sslCertificate %v for %v", certName, l.Name) + } + + glog.Infof("Using existing sslCertificate %v for %v", certName, l.Name) + l.sslCert = cert + return nil + } + // TODO: Currently, GCE only supports a single certificate per static IP // so we don't need to bother with disambiguation. Naming the cert after // the loadbalancer is a simplification. @@ -363,10 +383,13 @@ func (l *L7) checkSSLCert() (err error) { // TODO: Clean this code up into a ring buffer. primaryCertName := l.namer.Truncate(fmt.Sprintf("%v-%v", sslCertPrefix, l.Name)) secondaryCertName := l.namer.Truncate(fmt.Sprintf("%v-%d-%v", sslCertPrefix, 1, l.Name)) - certName := primaryCertName + certName = primaryCertName if l.sslCert != nil { certName = l.sslCert.Name } + + // Skip error checking because error-ing out will retry and loop, when we + // should create/update the cert if there is an error or does not exist. cert, _ := l.cloud.GetSslCertificate(certName) // PrivateKey is write only, so compare certs alone. We're assuming that @@ -383,7 +406,7 @@ func (l *L7) checkSSLCert() (err error) { } } - glog.Infof("Creating new sslCertificates %v for %v", l.Name, certName) + glog.Infof("Creating new sslCertificates %v for %v", certName, l.Name) cert, err = l.cloud.CreateSslCertificate(&compute.SslCertificate{ Name: certName, Certificate: ingCert, @@ -577,13 +600,14 @@ func (l *L7) edgeHop() error { return err } } - // Defer promoting an emphemral to a static IP till it's really needed. - if l.runtimeInfo.AllowHTTP && l.runtimeInfo.TLS != nil { + // Defer promoting an ephemeral to a static IP until it's really needed. + if l.runtimeInfo.AllowHTTP && (l.runtimeInfo.TLS != nil || l.runtimeInfo.TLSName != "") { + glog.V(3).Infof("checking static ip for %v", l.Name) if err := l.checkStaticIP(); err != nil { return err } } - if l.runtimeInfo.TLS != nil { + if l.runtimeInfo.TLS != nil || l.runtimeInfo.TLSName != "" { glog.V(3).Infof("validating https for %v", l.Name) if err := l.edgeHopHttps(); err != nil { return err @@ -843,7 +867,8 @@ func (l *L7) Cleanup() error { } l.tps = nil } - if l.sslCert != nil { + // Delete the SSL cert if it is from a secret, not referencing a pre-created GCE cert. + if l.sslCert != nil && l.runtimeInfo.TLSName == "" { glog.Infof("Deleting sslcert %v", l.sslCert.Name) if err := l.cloud.DeleteSslCertificate(l.sslCert.Name); err != nil { if !utils.IsHTTPErrorCode(err, http.StatusNotFound) { @@ -933,6 +958,9 @@ func GetLBAnnotations(l7 *L7, existing map[string]string, backendPool backends.B if l7.ip != nil { existing[fmt.Sprintf("%v/static-ip", utils.K8sAnnotationPrefix)] = l7.ip.Name } + if l7.sslCert != nil { + existing[fmt.Sprintf("%v/ssl-cert", utils.K8sAnnotationPrefix)] = l7.sslCert.Name + } // TODO: We really want to know *when* a backend flipped states. existing[fmt.Sprintf("%v/backends", utils.K8sAnnotationPrefix)] = jsonBackendState return existing diff --git a/controllers/gce/loadbalancers/loadbalancers_test.go b/controllers/gce/loadbalancers/loadbalancers_test.go index 4d6fe133b..6ed940f14 100644 --- a/controllers/gce/loadbalancers/loadbalancers_test.go +++ b/controllers/gce/loadbalancers/loadbalancers_test.go @@ -34,7 +34,7 @@ const ( ) func newFakeLoadBalancerPool(f LoadBalancers, t *testing.T) LoadBalancerPool { - fakeBackends := backends.NewFakeBackendServices() + fakeBackends := backends.NewFakeBackendServices(func(op int, be *compute.BackendService) error { return nil }) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) fakeHCs := healthchecks.NewFakeHealthChecks() namer := &utils.Namer{} @@ -103,6 +103,40 @@ func TestCreateHTTPSLoadBalancer(t *testing.T) { } } +func TestCreateHTTPSLoadBalancerAnnotationCert(t *testing.T) { + // This should NOT create the forwarding rule and target proxy + // associated with the HTTP branch of this loadbalancer. + tlsName := "external-cert-name" + lbInfo := &L7RuntimeInfo{ + Name: "test", + AllowHTTP: false, + TLSName: tlsName, + } + f := NewFakeLoadBalancers(lbInfo.Name) + f.CreateSslCertificate(&compute.SslCertificate{ + Name: tlsName, + }) + pool := newFakeLoadBalancerPool(f, t) + pool.Sync([]*L7RuntimeInfo{lbInfo}) + l7, err := pool.Get(lbInfo.Name) + if err != nil || l7 == nil { + t.Fatalf("Expected l7 not created") + } + um, err := f.GetUrlMap(f.umName()) + if err != nil || + um.DefaultService != pool.(*L7s).glbcDefaultBackend.SelfLink { + t.Fatalf("%v", err) + } + tps, err := f.GetTargetHttpsProxy(f.tpName(true)) + if err != nil || tps.UrlMap != um.SelfLink { + t.Fatalf("%v", err) + } + fws, err := f.GetGlobalForwardingRule(f.fwName(true)) + if err != nil || fws.Target != tps.SelfLink { + t.Fatalf("%v", err) + } +} + func TestCreateBothLoadBalancers(t *testing.T) { // This should create 2 forwarding rules and target proxies // but they should use the same urlmap, and have the same @@ -236,7 +270,8 @@ func TestUpdateUrlMapNoChanges(t *testing.T) { func TestNameParsing(t *testing.T) { clusterName := "123" - namer := utils.NewNamer(clusterName) + firewallName := clusterName + namer := utils.NewNamer(clusterName, firewallName) fullName := namer.Truncate(fmt.Sprintf("%v-%v", forwardingRulePrefix, namer.LBName("testlb"))) annotationsMap := map[string]string{ fmt.Sprintf("%v/forwarding-rule", utils.K8sAnnotationPrefix): fullName, @@ -308,7 +343,7 @@ func TestClusterNameChange(t *testing.T) { } func TestInvalidClusterNameChange(t *testing.T) { - namer := utils.NewNamer("test--123") + namer := utils.NewNamer("test--123", "test--123") if got := namer.GetClusterName(); got != "123" { t.Fatalf("Expected name 123, got %v", got) } diff --git a/controllers/gce/main.go b/controllers/gce/main.go index 2cc35751c..b8afbf9ab 100644 --- a/controllers/gce/main.go +++ b/controllers/gce/main.go @@ -39,6 +39,7 @@ import ( "k8s.io/kubernetes/pkg/util/wait" "github.com/golang/glog" + "github.com/prometheus/client_golang/prometheus/promhttp" ) // Entrypoint of GLBC. Example invocation: @@ -61,7 +62,7 @@ const ( alphaNumericChar = "0" // Current docker image version. Only used in debug logging. - imageVersion = "glbc:0.9.0" + imageVersion = "glbc:0.9.2" // Key used to persist UIDs to configmaps. uidConfigMapName = "ingress-uid" @@ -69,7 +70,7 @@ const ( var ( flags = flag.NewFlagSet( - `gclb: gclb --runngin-in-cluster=false --default-backend-node-port=123`, + `glbc: glbc --running-in-cluster=false`, flag.ExitOnError) clusterName = flags.String("cluster-uid", controller.DefaultClusterUID, @@ -132,6 +133,7 @@ func registerHandlers(lbc *controller.LoadBalancerController) { w.WriteHeader(200) w.Write([]byte("ok")) }) + http.Handle("/metrics", promhttp.Handler()) http.HandleFunc("/delete-all-and-quit", func(w http.ResponseWriter, r *http.Request) { // TODO: Retry failures during shutdown. lbc.Stop(true) @@ -213,7 +215,7 @@ func main() { if *inCluster || *useRealCloud { // Create cluster manager - namer, err := newNamer(kubeClient, *clusterName) + namer, err := newNamer(kubeClient, *clusterName, controller.DefaultFirewallName) if err != nil { glog.Fatalf("%v", err) } @@ -223,7 +225,7 @@ func main() { } } else { // Create fake cluster manager - clusterManager = controller.NewFakeClusterManager(*clusterName).ClusterManager + clusterManager = controller.NewFakeClusterManager(*clusterName, controller.DefaultFirewallName).ClusterManager } // Start loadbalancer controller @@ -245,32 +247,100 @@ func main() { } } -func newNamer(kubeClient client.Interface, clusterName string) (*utils.Namer, error) { +func newNamer(kubeClient client.Interface, clusterName string, fwName string) (*utils.Namer, error) { name, err := getClusterUID(kubeClient, clusterName) if err != nil { return nil, err } + fw_name, err := getFirewallName(kubeClient, fwName, name) + if err != nil { + return nil, err + } - namer := utils.NewNamer(name) - vault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) + namer := utils.NewNamer(name, fw_name) + uidVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) // Start a goroutine to poll the cluster UID config map // We don't watch because we know exactly which configmap we want and this // controller already watches 5 other resources, so it isn't worth the cost // of another connection and complexity. go wait.Forever(func() { - uid, found, err := vault.Get() - existing := namer.GetClusterName() - if found && uid != existing { - glog.Infof("Cluster uid changed from %v -> %v", existing, uid) - namer.SetClusterName(uid) - } else if err != nil { - glog.Errorf("Failed to reconcile cluster uid %v, currently set to %v", err, existing) + for _, key := range [...]string{storage.UidDataKey, storage.ProviderDataKey} { + val, found, err := uidVault.Get(key) + if err != nil { + glog.Errorf("Can't read uidConfigMap %v", uidConfigMapName) + } else if !found { + errmsg := fmt.Sprintf("Can't read %v from uidConfigMap %v", key, uidConfigMapName) + if key == storage.UidDataKey { + glog.Errorf(errmsg) + } else { + glog.V(4).Infof(errmsg) + } + } else { + + switch key { + case storage.UidDataKey: + if uid := namer.GetClusterName(); uid != val { + glog.Infof("Cluster uid changed from %v -> %v", uid, val) + namer.SetClusterName(val) + } + case storage.ProviderDataKey: + if fw_name := namer.GetFirewallName(); fw_name != val { + glog.Infof("Cluster firewall name changed from %v -> %v", fw_name, val) + namer.SetFirewallName(val) + } + } + } } }, 5*time.Second) return namer, nil } +// useDefaultOrLookupVault returns either a 'default_name' or if unset, obtains a name from a ConfigMap. +// The returned value follows this priority: +// If the provided 'default_name' is not empty, that name is used. +// This is effectively a client override via a command line flag. +// else, check cfgVault with 'cm_key' as a key and if found, use the associated value +// else, return an empty 'name' and pass along an error iff the configmap lookup is erroneous. +func useDefaultOrLookupVault(cfgVault *storage.ConfigMapVault, cm_key, default_name string) (string, error) { + if default_name != "" { + glog.Infof("Using user provided %v %v", cm_key, default_name) + // Don't save the uid in the vault, so users can rollback through + // setting the accompany flag to "" + return default_name, nil + } + val, found, err := cfgVault.Get(cm_key) + if err != nil { + // This can fail because of: + // 1. No such config map - found=false, err=nil + // 2. No such key in config map - found=false, err=nil + // 3. Apiserver flake - found=false, err!=nil + // It is not safe to proceed in 3. + return "", fmt.Errorf("Failed to retrieve %v: %v, returning empty name", cm_key, err) + } else if !found { + // Not found but safe to proceed. + return "", nil + } + glog.Infof("Using %v = %q saved in ConfigMap", cm_key, val) + return val, nil +} + +// getFirewallName returns the firewall rule name to use for this cluster. For +// backwards compatibility, the firewall name will default to the cluster UID. +// Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name. +// else, use the cluster UID as a backup (this retains backwards compatibility). +func getFirewallName(kubeClient client.Interface, name, cluster_uid string) (string, error) { + cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) + if fw_name, err := useDefaultOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil { + return "", err + } else if fw_name != "" { + return fw_name, cfgVault.Put(storage.ProviderDataKey, fw_name) + } else { + glog.Infof("Using cluster UID %v as firewall name", cluster_uid) + return cluster_uid, cfgVault.Put(storage.ProviderDataKey, cluster_uid) + } +} + // getClusterUID returns the cluster UID. Rules for UID generation: // If the user specifies a --cluster-uid param it overwrites everything // else, check UID config map for a previously recorded uid @@ -279,26 +349,12 @@ func newNamer(kubeClient client.Interface, clusterName string) (*utils.Namer, er // else, allocate a new uid func getClusterUID(kubeClient client.Interface, name string) (string, error) { cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) - if name != "" { - glog.Infof("Using user provided cluster uid %v", name) - // Don't save the uid in the vault, so users can rollback through - // --cluster-uid="" + if name, err := useDefaultOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil { + return "", err + } else if name != "" { return name, nil } - existingUID, found, err := cfgVault.Get() - if found { - glog.Infof("Using saved cluster uid %q", existingUID) - return existingUID, nil - } else if err != nil { - // This can fail because of: - // 1. No such config map - found=false, err=nil - // 2. No such key in config map - found=false, err=nil - // 3. Apiserver flake - found=false, err!=nil - // It is not safe to proceed in 3. - return "", fmt.Errorf("Failed to retrieve current uid: %v, using %q as name", err, name) - } - // Check if the cluster has an Ingress with ip ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(api.ListOptions{LabelSelector: labels.Everything()}) if err != nil { @@ -309,10 +365,10 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) { if len(ing.Status.LoadBalancer.Ingress) != 0 { c := namer.ParseName(loadbalancers.GCEResourceName(ing.Annotations, "forwarding-rule")) if c.ClusterName != "" { - return c.ClusterName, cfgVault.Put(c.ClusterName) + return c.ClusterName, cfgVault.Put(storage.UidDataKey, c.ClusterName) } glog.Infof("Found a working Ingress, assuming uid is empty string") - return "", cfgVault.Put("") + return "", cfgVault.Put(storage.UidDataKey, "") } } @@ -327,7 +383,7 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) { return "", err } uid := fmt.Sprintf("%x", b) - return uid, cfgVault.Put(uid) + return uid, cfgVault.Put(storage.UidDataKey, uid) } // getNodePort waits for the Service, and returns it's first node port. diff --git a/controllers/gce/rc.yaml b/controllers/gce/rc.yaml index 3023d0207..ec946831b 100644 --- a/controllers/gce/rc.yaml +++ b/controllers/gce/rc.yaml @@ -24,18 +24,18 @@ metadata: name: l7-lb-controller labels: k8s-app: glbc - version: v0.9.0 + version: v0.9.1 spec: # There should never be more than 1 controller alive simultaneously. replicas: 1 selector: k8s-app: glbc - version: v0.9.0 + version: v0.9.1 template: metadata: labels: k8s-app: glbc - version: v0.9.0 + version: v0.9.1 name: glbc spec: terminationGracePeriodSeconds: 600 @@ -61,7 +61,7 @@ spec: requests: cpu: 10m memory: 20Mi - - image: gcr.io/google_containers/glbc:0.9.0 + - image: gcr.io/google_containers/glbc:0.9.2 livenessProbe: httpGet: path: /healthz diff --git a/controllers/gce/storage/configmaps.go b/controllers/gce/storage/configmaps.go index cfed347fc..6af08b65d 100644 --- a/controllers/gce/storage/configmaps.go +++ b/controllers/gce/storage/configmaps.go @@ -19,6 +19,7 @@ package storage import ( "fmt" "strings" + "sync" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" @@ -27,73 +28,86 @@ import ( client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" ) -// UIDVault stores UIDs. -type UIDVault interface { - Get() (string, bool, error) - Put(string) error - Delete() error -} - -// uidDataKey is the key used in config maps to store the UID. -const uidDataKey = "uid" +const ( + // UidDataKey is the key used in config maps to store the UID. + UidDataKey = "uid" + // ProviderDataKey is the key used in config maps to store the Provider + // UID which we use to ensure unique firewalls. + ProviderDataKey = "provider-uid" +) // ConfigMapVault stores cluster UIDs in config maps. // It's a layer on top of ConfigMapStore that just implements the utils.uidVault // interface. type ConfigMapVault struct { + storeLock sync.Mutex ConfigMapStore cache.Store namespace string name string } -// Get retrieves the cluster UID from the cluster config map. +// Get retrieves the value associated to the provided 'key' from the cluster config map. // If this method returns an error, it's guaranteed to be apiserver flake. // If the error is a not found error it sets the boolean to false and // returns and error of nil instead. -func (c *ConfigMapVault) Get() (string, bool, error) { - key := fmt.Sprintf("%v/%v", c.namespace, c.name) - item, found, err := c.ConfigMapStore.GetByKey(key) +func (c *ConfigMapVault) Get(key string) (string, bool, error) { + keyStore := fmt.Sprintf("%v/%v", c.namespace, c.name) + item, found, err := c.ConfigMapStore.GetByKey(keyStore) if err != nil || !found { return "", false, err } - cfg := item.(*api.ConfigMap) - if k, ok := cfg.Data[uidDataKey]; ok { + data := item.(*api.ConfigMap).Data + c.storeLock.Lock() + defer c.storeLock.Unlock() + if k, ok := data[key]; ok { return k, true, nil } - return "", false, fmt.Errorf("Found config map %v but it doesn't contain uid key: %+v", key, cfg.Data) + glog.Infof("Found config map %v but it doesn't contain key %v: %+v", keyStore, key, data) + return "", false, nil } -// Put stores the given UID in the cluster config map. -func (c *ConfigMapVault) Put(uid string) error { +// Put inserts a key/value pair in the cluster config map. +// If the key already exists, the value provided is stored. +func (c *ConfigMapVault) Put(key, val string) error { + c.storeLock.Lock() + defer c.storeLock.Unlock() apiObj := &api.ConfigMap{ ObjectMeta: api.ObjectMeta{ Name: c.name, Namespace: c.namespace, }, - Data: map[string]string{uidDataKey: uid}, } cfgMapKey := fmt.Sprintf("%v/%v", c.namespace, c.name) item, exists, err := c.ConfigMapStore.GetByKey(cfgMapKey) if err == nil && exists { data := item.(*api.ConfigMap).Data - if k, ok := data[uidDataKey]; ok && k == uid { + existingVal, ok := data[key] + if ok && existingVal == val { + // duplicate, no need to update. return nil - } else if ok { - glog.Infof("Configmap %v has key %v but wrong value %v, updating", cfgMapKey, k, uid) } - + data[key] = val + apiObj.Data = data + if existingVal != val { + glog.Infof("Configmap %v has key %v but wrong value %v, updating to %v", cfgMapKey, key, existingVal, val) + } else { + glog.Infof("Configmap %v will be updated with %v = %v", cfgMapKey, key, val) + } if err := c.ConfigMapStore.Update(apiObj); err != nil { return fmt.Errorf("Failed to update %v: %v", cfgMapKey, err) } - } else if err := c.ConfigMapStore.Add(apiObj); err != nil { - return fmt.Errorf("Failed to add %v: %v", cfgMapKey, err) + } else { + apiObj.Data = map[string]string{key: val} + if err := c.ConfigMapStore.Add(apiObj); err != nil { + return fmt.Errorf("Failed to add %v: %v", cfgMapKey, err) + } } - glog.Infof("Successfully stored uid %q in config map %v", uid, cfgMapKey) + glog.Infof("Successfully stored key %v = %v in config map %v", key, val, cfgMapKey) return nil } -// Delete deletes the cluster UID storing config map. +// Delete deletes the ConfigMapStore. func (c *ConfigMapVault) Delete() error { cfgMapKey := fmt.Sprintf("%v/%v", c.namespace, c.name) item, _, err := c.ConfigMapStore.GetByKey(cfgMapKey) @@ -108,13 +122,19 @@ func (c *ConfigMapVault) Delete() error { // This client is essentially meant to abstract out the details of // configmaps and the API, and just store/retrieve a single value, the cluster uid. func NewConfigMapVault(c client.Interface, uidNs, uidConfigMapName string) *ConfigMapVault { - return &ConfigMapVault{NewConfigMapStore(c), uidNs, uidConfigMapName} + return &ConfigMapVault{ + ConfigMapStore: NewConfigMapStore(c), + namespace: uidNs, + name: uidConfigMapName} } // NewFakeConfigMapVault is an implementation of the ConfigMapStore that doesn't // persist configmaps. Only used in testing. func NewFakeConfigMapVault(ns, name string) *ConfigMapVault { - return &ConfigMapVault{cache.NewStore(cache.MetaNamespaceKeyFunc), ns, name} + return &ConfigMapVault{ + ConfigMapStore: cache.NewStore(cache.MetaNamespaceKeyFunc), + namespace: ns, + name: name} } // ConfigMapStore wraps the store interface. Implementations usually persist diff --git a/controllers/gce/storage/configmaps_test.go b/controllers/gce/storage/configmaps_test.go index 3b8404b89..8d25d6671 100644 --- a/controllers/gce/storage/configmaps_test.go +++ b/controllers/gce/storage/configmaps_test.go @@ -24,31 +24,51 @@ import ( func TestConfigMapUID(t *testing.T) { vault := NewFakeConfigMapVault(api.NamespaceSystem, "ingress-uid") - uid := "" - k, exists, err := vault.Get() + // Get value from an empty vault. + val, exists, err := vault.Get(UidDataKey) if exists { - t.Errorf("Got a key from an empyt vault") + t.Errorf("Got value from an empty vault") } - vault.Put(uid) - k, exists, err = vault.Get() + + // Store empty value for UidDataKey. + uid := "" + vault.Put(UidDataKey, uid) + val, exists, err = vault.Get(UidDataKey) if !exists || err != nil { - t.Errorf("Failed to retrieve value from vault") + t.Errorf("Failed to retrieve value from vault: %v", err) } - if k != "" { + if val != "" { t.Errorf("Failed to store empty string as a key in the vault") } - vault.Put("newuid") - k, exists, err = vault.Get() + + // Store actual value in key. + storedVal := "newuid" + vault.Put(UidDataKey, storedVal) + val, exists, err = vault.Get(UidDataKey) if !exists || err != nil { t.Errorf("Failed to retrieve value from vault") + } else if val != storedVal { + t.Errorf("Failed to store empty string as a key in the vault") } - if k != "newuid" { - t.Errorf("Failed to modify uid") + + // Store second value which will have the affect of updating to Store + // rather than adding. + secondVal := "bar" + vault.Put("foo", secondVal) + val, exists, err = vault.Get("foo") + if !exists || err != nil || val != secondVal { + t.Errorf("Failed to retrieve second value from vault") } + val, exists, err = vault.Get(UidDataKey) + if !exists || err != nil || val != storedVal { + t.Errorf("Failed to retrieve first value from vault") + } + + // Delete value. if err := vault.Delete(); err != nil { t.Errorf("Failed to delete uid %v", err) } - if uid, exists, _ := vault.Get(); exists { - t.Errorf("Found uid %v, expected none", uid) + if _, exists, _ := vault.Get(UidDataKey); exists { + t.Errorf("Found uid but expected none after deletion") } } diff --git a/controllers/gce/utils/utils.go b/controllers/gce/utils/utils.go index 33525ffa3..9d5dbfad1 100644 --- a/controllers/gce/utils/utils.go +++ b/controllers/gce/utils/utils.go @@ -92,14 +92,16 @@ const ( // Namer handles centralized naming for the cluster. type Namer struct { - clusterName string - nameLock sync.Mutex + clusterName string + firewallName string + nameLock sync.Mutex } -// NewNamer creates a new namer. -func NewNamer(clusterName string) *Namer { +// NewNamer creates a new namer with a Cluster and Firewall name. +func NewNamer(clusterName, firewallName string) *Namer { namer := &Namer{} namer.SetClusterName(clusterName) + namer.SetFirewallName(firewallName) return namer } @@ -123,6 +125,16 @@ func (n *Namer) SetClusterName(name string) { n.clusterName = name } +// SetFirewallName sets the firewall name of this cluster. +func (n *Namer) SetFirewallName(firewall_name string) { + n.nameLock.Lock() + defer n.nameLock.Unlock() + if n.firewallName != firewall_name { + glog.Infof("Changing firewall name from %v to %v", n.firewallName, firewall_name) + n.firewallName = firewall_name + } +} + // GetClusterName returns the UID/name of this cluster. func (n *Namer) GetClusterName() string { n.nameLock.Lock() @@ -130,6 +142,18 @@ func (n *Namer) GetClusterName() string { return n.clusterName } +// GetFirewallName returns the firewall name of this cluster. +func (n *Namer) GetFirewallName() string { + n.nameLock.Lock() + defer n.nameLock.Unlock() + // Retain backwards compatible behavior where firewallName == clusterName. + if n.firewallName == "" { + return n.clusterName + } else { + return n.firewallName + } +} + // Truncate truncates the given key to a GCE length limit. func (n *Namer) Truncate(key string) string { if len(key) > nameLenLimit { @@ -216,12 +240,12 @@ func (n *Namer) IGName() string { // FrSuffix constructs the glbc specific suffix for the FirewallRule. func (n *Namer) FrSuffix() string { - clusterName := n.GetClusterName() + firewallName := n.GetFirewallName() // The entire cluster only needs a single firewall rule. - if clusterName == "" { + if firewallName == "" { return globalFirewallSuffix } - return n.Truncate(fmt.Sprintf("%v%v%v", globalFirewallSuffix, clusterNameDelimiter, clusterName)) + return n.Truncate(fmt.Sprintf("%v%v%v", globalFirewallSuffix, clusterNameDelimiter, firewallName)) } // FrName constructs the full firewall rule name, this is the name assigned by diff --git a/controllers/nginx/Changelog.md b/controllers/nginx/Changelog.md index ef43ba96f..761cda127 100644 --- a/controllers/nginx/Changelog.md +++ b/controllers/nginx/Changelog.md @@ -1,5 +1,51 @@ Changelog +### 0.9-beta.2 + +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2` + +*New Features:* + +- New configuration flag `proxy-set-headers` to allow set custom headers before send traffic to backends. [Example here](https://github.com/kubernetes/ingress/tree/master/examples/customization/custom-headers/nginx) +- Disable directive access_log globally using `disable-access-log: "true"` in the configuration ConfigMap. +- Sticky session per Ingress rule using the annotation `ingress.kubernetes.io/affinity`. [Example here](https://github.com/kubernetes/ingress/tree/master/examples/affinity/cookie/nginx) + +*Changes:* + +- [X] [#300](https://github.com/kubernetes/ingress/pull/300) Change nginx variable to use in filter of access_log +- [X] [#296](https://github.com/kubernetes/ingress/pull/296) Fix rewrite regex to match the start of the URL and not a substring +- [X] [#293](https://github.com/kubernetes/ingress/pull/293) Update makefile gcloud docker command +- [X] [#290](https://github.com/kubernetes/ingress/pull/290) Update nginx version in ingress controller to 1.11.10 +- [X] [#286](https://github.com/kubernetes/ingress/pull/286) Add logs to help debugging and simplify default upstream configuration +- [X] [#285](https://github.com/kubernetes/ingress/pull/285) Added a Node StoreLister type +- [X] [#281](https://github.com/kubernetes/ingress/pull/281) Add chmod up directory tree for world read/execute on directories +- [X] [#279](https://github.com/kubernetes/ingress/pull/279) fix wrong link in the file of examples/README.md +- [X] [#275](https://github.com/kubernetes/ingress/pull/275) Pass headers to custom error backend +- [X] [#272](https://github.com/kubernetes/ingress/pull/272) Fix error getting class information from Ingress annotations +- [X] [#268](https://github.com/kubernetes/ingress/pull/268) minor: Fix typo in nginx README +- [X] [#265](https://github.com/kubernetes/ingress/pull/265) Fix rewrite annotation parser +- [X] [#262](https://github.com/kubernetes/ingress/pull/262) Add nginx README and configuration docs back +- [X] [#261](https://github.com/kubernetes/ingress/pull/261) types.go: fix typo in godoc +- [X] [#258](https://github.com/kubernetes/ingress/pull/258) Nginx sticky annotations +- [X] [#255](https://github.com/kubernetes/ingress/pull/255) Adds support for disabling access_log globally +- [X] [#247](https://github.com/kubernetes/ingress/pull/247) Fix wrong URL in nginx ingress configuration +- [X] [#246](https://github.com/kubernetes/ingress/pull/246) Add support for custom proxy headers using a ConfigMap +- [X] [#244](https://github.com/kubernetes/ingress/pull/244) Add information about cors annotation +- [X] [#241](https://github.com/kubernetes/ingress/pull/241) correct a spell mistake +- [X] [#232](https://github.com/kubernetes/ingress/pull/232) Change searchs with searches +- [X] [#231](https://github.com/kubernetes/ingress/pull/231) Add information about proxy_protocol in port 442 +- [X] [#228](https://github.com/kubernetes/ingress/pull/228) Fix worker check issue +- [X] [#227](https://github.com/kubernetes/ingress/pull/227) proxy_protocol on ssl_passthrough listener +- [X] [#223](https://github.com/kubernetes/ingress/pull/223) Fix panic if a tempfile cannot be created +- [X] [#220](https://github.com/kubernetes/ingress/pull/220) Fixes for minikube usage instructions. +- [X] [#219](https://github.com/kubernetes/ingress/pull/219) Fix typo, add a couple of links. +- [X] [#218](https://github.com/kubernetes/ingress/pull/218) Improve links from CONTRIBUTING. +- [X] [#217](https://github.com/kubernetes/ingress/pull/217) Fix an e2e link. +- [X] [#212](https://github.com/kubernetes/ingress/pull/212) Simplify code to obtain TCP or UDP services +- [X] [#208](https://github.com/kubernetes/ingress/pull/208) Fix nil HTTP field +- [X] [#198](https://github.com/kubernetes/ingress/pull/198) Add an example for static-ip and deployment + + ### 0.9-beta.1 **Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.1` diff --git a/controllers/nginx/Makefile b/controllers/nginx/Makefile index 65eb4af98..bf22ee4ab 100644 --- a/controllers/nginx/Makefile +++ b/controllers/nginx/Makefile @@ -3,9 +3,10 @@ all: push BUILDTAGS= # Use the 0.0 tag for testing, it shouldn't clobber any release builds -RELEASE?=0.9.0-beta.1 +RELEASE?=0.9.0-beta.2 PREFIX?=gcr.io/google_containers/nginx-ingress-controller GOOS?=linux +DOCKER?=gcloud docker -- REPO_INFO=$(shell git config --get remote.origin.url) @@ -20,11 +21,11 @@ build: clean -ldflags "-s -w -X ${PKG}/pkg/version.RELEASE=${RELEASE} -X ${PKG}/pkg/version.COMMIT=${COMMIT} -X ${PKG}/pkg/version.REPO=${REPO_INFO}" \ -o rootfs/nginx-ingress-controller ${PKG}/pkg/cmd/controller -container: build - docker build -t $(PREFIX):$(RELEASE) rootfs +container: + $(DOCKER) build --pull -t $(PREFIX):$(RELEASE) rootfs push: container - gcloud docker push $(PREFIX):$(RELEASE) + $(DOCKER) push $(PREFIX):$(RELEASE) fmt: @echo "+ $@" diff --git a/controllers/nginx/README.md b/controllers/nginx/README.md index 43127725d..7b4d228fe 100644 --- a/controllers/nginx/README.md +++ b/controllers/nginx/README.md @@ -1,2 +1,463 @@ - # Nginx Ingress Controller + +This is an nginx Ingress controller that uses [ConfigMap](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/configmap.md) to store the nginx configuration. See [Ingress controller documentation](../README.md) for details on how it works. + +## Contents +* [Conventions](#conventions) +* [Requirements](#requirements) +* [Dry running](#try-running-the-ingress-controller) +* [Deployment](#deployment) +* [HTTP](#http) +* [HTTPS](#https) + * [Default SSL Certificate](#default-ssl-certificate) + * [HTTPS enforcement](#server-side-https-enforcement) + * [HSTS](#http-strict-transport-security) + * [Kube-Lego](#automated-certificate-management-with-kube-lego) +* [TCP Services](#exposing-tcp-services) +* [UDP Services](#exposing-udp-services) +* [Proxy Protocol](#proxy-protocol) +* [NGINX customization](configuration.md) +* [Custom errors](#custom-errors) +* [NGINX status page](#nginx-status-page) +* [Running multiple ingress controllers](#running-multiple-ingress-controllers) +* [Running on Cloudproviders](#running-on-cloudproviders) +* [Disabling NGINX ingress controller](#disabling-nginx-ingress-controller) +* [Log format](#log-format) +* [Local cluster](#local-cluster) +* [Debug & Troubleshooting](#debug--troubleshooting) +* [Limitations](#limitations) +* [Why endpoints and not services?](#why-endpoints-and-not-services) +* [NGINX Notes](#nginx-notes) + +## Conventions + +Anytime we reference a tls secret, we mean (x509, pem encoded, RSA 2048, etc). You can generate such a certificate with: + `openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${KEY_FILE} -out ${CERT_FILE} -subj "/CN=${HOST}/O=${HOST}"` + and create the secret via `kubectl create secret tls ${CERT_NAME} --key ${KEY_FILE} --cert ${CERT_FILE}` + + + +## Requirements +- Default backend [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) + + +## Try running the Ingress controller + +Before deploying the controller to production you might want to run it outside the cluster and observe it. + +```console +$ make controller +$ mkdir /etc/nginx-ssl +$ ./nginx-ingress-controller --running-in-cluster=false --default-backend-service=kube-system/default-http-backend +``` + +## Deployment + +First create a default backend: +``` +$ kubectl create -f examples/default-backend.yaml +$ kubectl expose rc default-http-backend --port=80 --target-port=8080 --name=default-http-backend +``` + +Loadbalancers are created via a ReplicationController or Daemonset: + +``` +$ kubectl create -f examples/default/rc-default.yaml +``` + +## HTTP + +First we need to deploy some application to publish. To keep this simple we will use the [echoheaders app](https://github.com/kubernetes/contrib/blob/master/ingress/echoheaders/echo-app.yaml) that just returns information about the http request as output +``` +kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.4 --replicas=1 --port=8080 +``` + +Now we expose the same application in two different services (so we can create different Ingress rules) +``` +kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x +kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-y +``` + +Next we create a couple of Ingress rules +``` +kubectl create -f examples/ingress.yaml +``` + +we check that ingress rules are defined: +``` +$ kubectl get ing +NAME RULE BACKEND ADDRESS +echomap - + foo.bar.com + /foo echoheaders-x:80 + bar.baz.com + /bar echoheaders-y:80 + /foo echoheaders-x:80 +``` + +Before the deploy of the Ingress controller we need a default backend [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) +``` +kubectl create -f examples/default-backend.yaml +kubectl expose rc default-http-backend --port=80 --target-port=8080 --name=default-http-backend +``` + +Check NGINX it is running with the defined Ingress rules: + +``` +$ LBIP=$(kubectl get node `kubectl get po -l name=nginx-ingress-lb --template '{{range .items}}{{.spec.nodeName}}{{end}}'` --template '{{range $i, $n := .status.addresses}}{{if eq $n.type "ExternalIP"}}{{$n.address}}{{end}}{{end}}') +$ curl $LBIP/foo -H 'Host: foo.bar.com' +``` + +## HTTPS + +You can secure an Ingress by specifying a secret that contains a TLS private key and certificate. Currently the Ingress only supports a single TLS port, 443, and assumes TLS termination. This controller supports SNI. The TLS secret must contain keys named tls.crt and tls.key that contain the certificate and private key to use for TLS, eg: + +``` +apiVersion: v1 +data: + tls.crt: base64 encoded cert + tls.key: base64 encoded key +kind: Secret +metadata: + name: foo-secret + namespace: default +type: kubernetes.io/tls +``` + +Referencing this secret in an Ingress will tell the Ingress controller to secure the channel from the client to the loadbalancer using TLS: + +``` +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: no-rules-map +spec: + tls: + secretName: foo-secret + backend: + serviceName: s1 + servicePort: 80 +``` +Please follow [test.sh](https://github.com/bprashanth/Ingress/blob/master/examples/sni/nginx/test.sh) as a guide on how to generate secrets containing SSL certificates. The name of the secret can be different than the name of the certificate. + +Check the [example](examples/tls/README.md) + +### Default SSL Certificate + +NGINX provides the option [server name](http://nginx.org/en/docs/http/server_names.html) as a catch-all in case of requests that do not match one of the configured server names. This configuration works without issues for HTTP traffic. In case of HTTPS NGINX requires a certificate. For this reason the Ingress controller provides the flag `--default-ssl-certificate`. The secret behind this flag contains the default certificate to be used in the mentioned case. +If this flag is not provided NGINX will use a self signed certificate. + +Running without the flag `--default-ssl-certificate`: + +``` +$ curl -v https://10.2.78.7:443 -k +* Rebuilt URL to: https://10.2.78.7:443/ +* Trying 10.2.78.4... +* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0) +* ALPN, offering http/1.1 +* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt + CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS header, Certificate Status (22): +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Server hello (2): +* TLSv1.2 (IN), TLS handshake, Certificate (11): +* TLSv1.2 (IN), TLS handshake, Server key exchange (12): +* TLSv1.2 (IN), TLS handshake, Server finished (14): +* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.2 (OUT), TLS change cipher, Client hello (1): +* TLSv1.2 (OUT), TLS handshake, Finished (20): +* TLSv1.2 (IN), TLS change cipher, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 +* ALPN, server accepted to use http/1.1 +* Server certificate: +* subject: CN=foo.bar.com +* start date: Apr 13 00:50:56 2016 GMT +* expire date: Apr 13 00:50:56 2017 GMT +* issuer: CN=foo.bar.com +* SSL certificate verify result: self signed certificate (18), continuing anyway. +> GET / HTTP/1.1 +> Host: 10.2.78.7 +> User-Agent: curl/7.47.1 +> Accept: */* +> +< HTTP/1.1 404 Not Found +< Server: nginx/1.11.1 +< Date: Thu, 21 Jul 2016 15:38:46 GMT +< Content-Type: text/html +< Transfer-Encoding: chunked +< Connection: keep-alive +< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload +< +The page you're looking for could not be found. + +* Connection #0 to host 10.2.78.7 left intact +``` + +Specifying `--default-ssl-certificate=default/foo-tls`: + +``` +core@localhost ~ $ curl -v https://10.2.78.7:443 -k +* Rebuilt URL to: https://10.2.78.7:443/ +* Trying 10.2.78.7... +* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0) +* ALPN, offering http/1.1 +* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt + CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS header, Certificate Status (22): +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Server hello (2): +* TLSv1.2 (IN), TLS handshake, Certificate (11): +* TLSv1.2 (IN), TLS handshake, Server key exchange (12): +* TLSv1.2 (IN), TLS handshake, Server finished (14): +* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.2 (OUT), TLS change cipher, Client hello (1): +* TLSv1.2 (OUT), TLS handshake, Finished (20): +* TLSv1.2 (IN), TLS change cipher, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 +* ALPN, server accepted to use http/1.1 +* Server certificate: +* subject: CN=foo.bar.com +* start date: Apr 13 00:50:56 2016 GMT +* expire date: Apr 13 00:50:56 2017 GMT +* issuer: CN=foo.bar.com +* SSL certificate verify result: self signed certificate (18), continuing anyway. +> GET / HTTP/1.1 +> Host: 10.2.78.7 +> User-Agent: curl/7.47.1 +> Accept: */* +> +< HTTP/1.1 404 Not Found +< Server: nginx/1.11.1 +< Date: Mon, 18 Jul 2016 21:02:59 GMT +< Content-Type: text/html +< Transfer-Encoding: chunked +< Connection: keep-alive +< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload +< +The page you're looking for could not be found. + +* Connection #0 to host 10.2.78.7 left intact +``` + + +### Server-side HTTPS enforcement + +By default the controller redirects (301) to HTTPS if TLS is enabled for that ingress . If you want to disable that behaviour globally, you can use `ssl-redirect: "false"` in the NGINX config map. + +To configure this feature for specific ingress resources, you can use the `ingress.kubernetes.io/ssl-redirect: "false"` annotation in the particular resource. + + +### HTTP Strict Transport Security + +HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS. + +By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule. + +To disable this behavior use `hsts=false` in the NGINX config map. + + +### Automated Certificate Management with Kube-Lego + +[Kube-Lego] automatically requests missing certificates or expired from +[Let's Encrypt] by monitoring ingress resources and its referenced secrets. To +enable this for an ingress resource you have to add an annotation: + +``` +kubectl annotate ing ingress-demo kubernetes.io/tls-acme="true" +``` + +To setup Kube-Lego you can take a look at this [full example]. The first +version to fully support Kube-Lego is nginx Ingress controller 0.8. + +[full example]:https://github.com/jetstack/kube-lego/tree/master/examples +[Kube-Lego]:https://github.com/jetstack/kube-lego +[Let's Encrypt]:https://letsencrypt.org + +## Exposing TCP services + +Ingress does not support TCP services (yet). For this reason this Ingress controller uses the flag `--tcp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `:` +It is possible to use a number or the name of the port. + +The next example shows how to expose the service `example-go` running in the namespace `default` in the port `8080` using the port `9000` +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: tcp-configmap-example +data: + 9000: "default/example-go:8080" +``` + + +Please check the [tcp services](examples/tcp/README.md) example + +## Exposing UDP services + +Since 1.9.13 NGINX provides [UDP Load Balancing](https://www.nginx.com/blog/announcing-udp-load-balancing/). + +Ingress does not support UDP services (yet). For this reason this Ingress controller uses the flag `--udp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `:` +It is possible to use a number or the name of the port. + +The next example shows how to expose the service `kube-dns` running in the namespace `kube-system` in the port `53` using the port `53` +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: udp-configmap-example +data: + 53: "kube-system/kube-dns:53" +``` + + +Please check the [udp services](examples/udp/README.md) example + +## Proxy Protocol + +If you are using a L4 proxy to forward the traffic to the NGINX pods and terminate HTTP/HTTPS there, you will lose the remote endpoint's IP addresses. To prevent this you could use the [Proxy Protocol](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt) for forwarding traffic, this will send the connection details before forwarding the actual TCP connection itself. + +Amongst others [ELBs in AWS](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-proxy-protocol.html) and [HAProxy](http://www.haproxy.org/) support Proxy Protocol. + +Please check the [proxy-protocol](examples/proxy-protocol/) example + + +### Custom errors + +In case of an error in a request the body of the response is obtained from the `default backend`. Each request to the default backend includes two headers: +- `X-Code` indicates the HTTP code +- `X-Format` the value of the `Accept` header + +Using this two headers is possible to use a custom backend service like [this one](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) that inspect each request and returns a custom error page with the format expected by the client. Please check the example [custom-errors](examples/custom-errors/README.md) + +### NGINX status page + +The ngx_http_stub_status_module module provides access to basic status information. This is the default module active in the url `/nginx_status`. +This controller provides an alternative to this module using [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) third party module. +To use this module just provide a config map with the key `enable-vts-status=true`. The URL is exposed in the port 8080. +Please check the example `example/rc-default.yaml` + +![nginx-module-vts screenshot](https://cloud.githubusercontent.com/assets/3648408/10876811/77a67b70-8183-11e5-9924-6a6d0c5dc73a.png "screenshot with filter") + +To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json` + +### Running multiple ingress controllers + +If you're running multiple ingress controllers, or running on a cloudprovider that natively handles +ingress, you need to specify the annotation `kubernetes.io/ingress.class: "nginx"` in all ingresses +that you would like this controller to claim. Not specifying the annotation will lead to multiple +ingress controllers claiming the same ingress. Specifying the wrong value will result in all ingress +controllers ignoring the ingress. Multiple ingress controllers running in the same cluster was not +supported in Kubernetes versions < 1.3. + +### Running on Cloudproviders + +If you're running this ingress controller on a cloudprovider, you should assume the provider also has a native +Ingress controller and specify the ingress.class annotation as indicated in this section. +In addition to this, you will need to add a firewall rule for each port this controller is listening on, i.e :80 and :443. + +### Disabling NGINX ingress controller + +Setting the annotation `kubernetes.io/ingress.class` to any value other than "nginx" or the empty string, will force the NGINX Ingress controller to ignore your Ingress. Do this if you wish to use one of the other Ingress controllers at the same time as the NGINX controller. + +### Log format + +The default configuration uses a custom logging format to add additional information about upstreams + +``` + log_format upstreaminfo '{{ if $cfg.useProxyProtocol }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - ' + '[$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + '$request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status'; +``` + +Sources: + - [upstream variables](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#variables) + - [embedded variables](http://nginx.org/en/docs/http/ngx_http_core_module.html#variables) + +Description: +- `$proxy_protocol_addr`: if PROXY protocol is enabled +- `$remote_addr`: if PROXY protocol is disabled (default) +- `$proxy_add_x_forwarded_for`: the `X-Forwarded-For` client request header field with the $remote_addr variable appended to it, separated by a comma +- `$remote_user`: user name supplied with the Basic authentication +- `$time_local`: local time in the Common Log Format +- `$request`: full original request line +- `$status`: response status +- `$body_bytes_sent`: number of bytes sent to a client, not counting the response header +- `$http_referer`: value of the Referer header +- `$http_user_agent`: value of User-Agent header +- `$request_length`: request length (including request line, header, and request body) +- `$request_time`: time elapsed since the first bytes were read from the client +- `$proxy_upstream_name`: name of the upstream. The format is `upstream---` +- `$upstream_addr`: keeps the IP address and port, or the path to the UNIX-domain socket of the upstream server. If several servers were contacted during request processing, their addresses are separated by commas +- `$upstream_response_length`: keeps the length of the response obtained from the upstream server +- `$upstream_response_time`: keeps time spent on receiving the response from the upstream server; the time is kept in seconds with millisecond resolution +- `$upstream_status`: keeps status code of the response obtained from the upstream server + +### Local cluster + +Using [`hack/local-up-cluster.sh`](https://github.com/kubernetes/kubernetes/blob/master/hack/local-up-cluster.sh) is possible to start a local kubernetes cluster consisting of a master and a single node. Please read [running-locally.md](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/running-locally.md) for more details. + +Use of `hostNetwork: true` in the ingress controller is required to falls back at localhost:8080 for the apiserver if every other client creation check fails (eg: service account not present, kubeconfig doesn't exist, no master env vars...) + + +### Debug & Troubleshooting + +Using the flag `--v=XX` it is possible to increase the level of logging. +In particular: +- `--v=2` shows details using `diff` about the changes in the configuration in nginx + +``` +I0316 12:24:37.581267 1 utils.go:148] NGINX configuration diff a//etc/nginx/nginx.conf b//etc/nginx/nginx.conf +I0316 12:24:37.581356 1 utils.go:149] --- /tmp/922554809 2016-03-16 12:24:37.000000000 +0000 ++++ /tmp/079811012 2016-03-16 12:24:37.000000000 +0000 +@@ -235,7 +235,6 @@ + + upstream default-echoheadersx { + least_conn; +- server 10.2.112.124:5000; + server 10.2.208.50:5000; + + } +I0316 12:24:37.610073 1 command.go:69] change in configuration detected. Reloading... +``` + +- `--v=3` shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format +- `--v=5` configures NGINX in [debug mode](http://nginx.org/en/docs/debugging_log.html) + + + +*These issues were encountered in past versions of Kubernetes:* + +[1.2.0-alpha7 deployment](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/docker.md): + +* make setup-files.sh file in hypercube does not provide 10.0.0.1 IP to make-ca-certs, resulting in CA certs that are issued to the external cluster IP address rather then 10.0.0.1 -> this results in nginx-third-party-lb appearing to get stuck at "Utils.go:177 - Waiting for default/default-http-backend" in the docker logs. Kubernetes will eventually kill the container before nginx-third-party-lb times out with a message indicating that the CA certificate issuer is invalid (wrong ip), to verify this add zeros to the end of initialDelaySeconds and timeoutSeconds and reload the RC, and docker will log this error before kubernetes kills the container. + * To fix the above, setup-files.sh must be patched before the cluster is inited (refer to https://github.com/kubernetes/kubernetes/pull/21504) + + +### Limitations + +- Ingress rules for TLS require the definition of the field `host` + + +### Why endpoints and not services + +The NGINX ingress controller does not uses [Services](http://kubernetes.io/docs/user-guide/services) to route traffic to the pods. Instead it uses the Endpoints API in order to bypass [kube-proxy](http://kubernetes.io/docs/admin/kube-proxy/) to allow NGINX features like session affinity and custom load balancing algorithms. It also removes some overhead, such as conntrack entries for iptables DNAT. + + +### NGINX notes + +Since `gcr.io/google_containers/nginx-slim:0.8` NGINX contains the next patches: +- Dynamic TLS record size [nginx__dynamic_tls_records.patch](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/) +NGINX provides the parameter `ssl_buffer_size` to adjust the size of the buffer. Default value in NGINX is 16KB. The ingress controller changes the default to 4KB. This improves the [TLS Time To First Byte (TTTFB)](https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/) but the size is fixed. This patches adapts the size of the buffer to the content is being served helping to improve the perceived latency. + +- Add SPDY support back to Nginx with HTTP/2 [nginx_1_9_15_http2_spdy.patch](https://github.com/cloudflare/sslconfig/pull/36) +At the same NGINX introduced HTTP/2 support for SPDY was removed. This patch add support for SPDY without compromising HTTP/2 support using the Application-Layer Protocol Negotiation (ALPN) or Next Protocol Negotiation (NPN) Transport Layer Security (TLS) extension to negotiate what protocol the server and client support +``` +openssl s_client -servername www.my-site.com -connect www.my-site.com:443 -nextprotoneg '' +CONNECTED(00000003) +Protocols advertised by server: h2, spdy/3.1, http/1.1 +``` diff --git a/controllers/nginx/configuration.md b/controllers/nginx/configuration.md index 776644b6f..e8ffd81ff 100644 --- a/controllers/nginx/configuration.md +++ b/controllers/nginx/configuration.md @@ -15,6 +15,7 @@ * [Websockets](#websockets) * [Optimizing TLS Time To First Byte (TTTFB)](#optimizing-tls-time-to-first-byte-tttfb) * [Retries in non-idempotent methods](#retries-in-non-idempotent-methods) +* [Custom max body size](#custom-max-body-size) ### Customizing NGINX @@ -39,14 +40,22 @@ The following annotations are supported: |Name |type| |---------------------------|------| |[ingress.kubernetes.io/add-base-url](#rewrite)|true or false| +|[ingress.kubernetes.io/affinity](#session-affinity)|true or false| |[ingress.kubernetes.io/auth-realm](#authentication)|string| |[ingress.kubernetes.io/auth-secret](#authentication)|string| |[ingress.kubernetes.io/auth-type](#authentication)|basic or digest| |[ingress.kubernetes.io/auth-url](#external-authentication)|string| +|[ingress.kubernetes.io/auth-tls-secret](#Certificate Authentication)|string| +|[ingress.kubernetes.io/auth-tls-verify-depth](#Certificate Authentication)|number| +|[ingress.kubernetes.io/enable-cors](#enable-cors)|true or false| +|[ingress.kubernetes.io/force-ssl-redirect](#server-side-https-enforcement-through-redirect)|true or false| |[ingress.kubernetes.io/limit-connections](#rate-limiting)|number| |[ingress.kubernetes.io/limit-rps](#rate-limiting)|number| +|[ingress.kubernetes.io/proxy-body-size](#custom-max-body-size)|string| |[ingress.kubernetes.io/rewrite-target](#rewrite)|URI| |[ingress.kubernetes.io/secure-backends](#secure-backends)|true or false| +|[ingress.kubernetes.io/session-cookie-name](#cookie-affinity)|string| +|[ingress.kubernetes.io/session-cookie-hash](#cookie-affinity)|string| |[ingress.kubernetes.io/ssl-redirect](#server-side-https-enforcement-through-redirect)|true or false| |[ingress.kubernetes.io/upstream-max-fails](#custom-nginx-upstream-checks)|number| |[ingress.kubernetes.io/upstream-fail-timeout](#custom-nginx-upstream-checks)|number| @@ -66,7 +75,7 @@ In addition to the built-in functions provided by the Go package the following f - empty: returns true if the specified parameter (string) is empty - contains: [strings.Contains](https://golang.org/pkg/strings/#Contains) - - hasPrefix: [strings.HasPrefix](https://golang.org/pkg/strings/#Contains) + - hasPrefix: [strings.HasPrefix](https://golang.org/pkg/strings/#HasPrefix) - hasSuffix: [strings.HasSuffix](https://golang.org/pkg/strings/#HasSuffix) - toUpper: [strings.ToUpper](https://golang.org/pkg/strings/#ToUpper) - toLower: [strings.ToLower](https://golang.org/pkg/strings/#ToLower) @@ -118,8 +127,33 @@ The secret must be created in the same namespace as the Ingress rule. ingress.kubernetes.io/auth-realm: "realm string" ``` -Please check the [auth](examples/auth/README.md) example. +Please check the [auth](/examples/auth/nginx/README.md) example. +### Certificate Authentication + +It's possible to enable Certificate based authentication using additional annotations in Ingres Rule. + +The annotations are: + +``` +ingress.kubernetes.io/auth-tls-secret: secretName +``` + +The name of the secret that contains the full Certificate Authority chain that is enabled to authenticate against this ingress. It's composed of namespace/secretName + +``` +ingress.kubernetes.io/auth-tls-verify-depth +``` + +The validation depth between the provided client certificate and the Certification Authority chain. + +Please check the [tls-auth](/examples/auth/client-certs/nginx/README.md) example. + + +### Enable CORS + +To enable Cross-Origin Resource Sharing (CORS) in an Ingress rule add the annotation `ingress.kubernetes.io/enable-cors: "true"`. This will add a section in the server location enabling this functionality. +For more information please check https://enable-cors.org/server_nginx.html ### External Authentication @@ -130,7 +164,7 @@ Additionally it is possible to set `ingress.kubernetes.io/auth-method` to specif ingress.kubernetes.io/auth-url: "URL to the authentication service" ``` -Please check the [external-auth](examples/external-auth/README.md) example. +Please check the [external-auth](/examples/auth/external-auth/nginx/README.md) example. ### Rewrite @@ -165,6 +199,8 @@ By default the controller redirects (301) to `HTTPS` if TLS is enabled for that To configure this feature for specific ingress resources, you can use the `ingress.kubernetes.io/ssl-redirect: "false"` annotation in the particular resource. +When using SSL offloading outside of cluster (e.g. AWS ELB) it may be usefull to enforce a redirect to `HTTPS` even when there is not TLS cert available. This can be achieved by using the `ingress.kubernetes.io/force-ssl-redirect: "true"` annotation in the particular resource. + ### Whitelist source range @@ -174,7 +210,22 @@ To configure this setting globally for all Ingress rules, the `whitelist-source- *Note:* Adding an annotation to an Ingress rule overrides any global restriction. -Please check the [whitelist](examples/whitelist/README.md) example. +Please check the [whitelist](/examples/affinity/cookie/nginx/README.md) example. + + +### Session Affinity + +The annotation `ingress.kubernetes.io/affinity` enables and sets the affinity type in all Upstreams of an Ingress. This way, a request will always be directed to the same upstream server. + + +#### Cookie affinity +If you use the ``cookie`` type you can also specify the name of the cookie that will be used to route the requests with the annotation `ingress.kubernetes.io/session-cookie-name`. The default is to create a cookie named 'route'. + +In case of NGINX the annotation `ingress.kubernetes.io/session-cookie-hash` defines which algorithm will be used to 'hash' the used upstream. Default value is `md5` and possible values are `md5`, `sha1` and `index`. +The `index` option is not hashed, an in-memory index is used instead, it's quicker and the overhead is shorter Warning: the matching against upstream servers list is inconsistent. So, at reload, if upstreams servers has changed, index values are not guaranted to correspond to the same server as before! USE IT WITH CAUTION and only if you need to! + +In NGINX this feature is implemented by the third party module [nginx-sticky-module-ng](https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng). The workflow used to define which upstream server will be used is explained [here](https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng/raw/08a395c66e425540982c00482f55034e1fee67b6/docs/sticky.pdf) + ### **Allowed parameters in configuration ConfigMap** @@ -188,6 +239,12 @@ Setting at least one code also enables [proxy_intercept_errors](http://nginx.org Example usage: `custom-http-errors: 404,415` +**disable-access-log:** Disables the Access Log from the entire Ingress Controller. This is 'false' by default. + + +**disable-ipv6:** Disable listening on IPV6. This is 'false' by default. + + **enable-dynamic-tls-records:** Enables dynamically sized TLS records to improve time-to-first-byte. Enabled by default. See [CloudFlare's blog](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency) for more information. @@ -231,6 +288,12 @@ http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout **proxy-connect-timeout:** Sets the timeout for [establishing a connection with a proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_connect_timeout). It should be noted that this timeout cannot usually exceed 75 seconds. +**proxy-cookie-domain:** Sets a text that [should be changed in the domain attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain) of the “Set-Cookie” header fields of a proxied server response. + + +**proxy-cookie-path:** Sets a text that [should be changed in the path attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_path) of the “Set-Cookie” header fields of a proxied server response. + + **proxy-read-timeout:** Sets the timeout in seconds for [reading a response from the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout). The timeout is set only between two successive read operations, not for the transmission of the whole response. @@ -273,7 +336,7 @@ The recommendation above prioritizes algorithms that provide perfect [forward se Please check the [Mozilla SSL Configuration Generator](https://mozilla.github.io/server-side-tls/ssl-config-generator/). -**ssl-dh-param:** sets the Base64 string that contains Diffie-Hellman key to help with "Perfect Forward Secrecy". +**ssl-dh-param:** Sets the name of the secret that contains Diffie-Hellman key to help with "Perfect Forward Secrecy". https://www.openssl.org/docs/manmaster/apps/dhparam.html https://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam @@ -348,7 +411,11 @@ The following table shows the options, the default value and a description. |keep-alive|"75"| |map-hash-bucket-size|"64"| |max-worker-connections|"16384"| +|proxy-body-size|same as body-size| +|proxy-buffer-size|"4k"| |proxy-connect-timeout|"5"| +|proxy-cookie-domain|"off"| +|proxy-cookie-path|"off"| |proxy-read-timeout|"60"| |proxy-real-ip-cidr|0.0.0.0/0| |proxy-send-timeout|"60"| @@ -388,3 +455,15 @@ NGINX provides the configuration option [ssl_buffer_size](http://nginx.org/en/do Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error. The previous behavior can be restored using `retry-non-idempotent=true` in the configuration ConfigMap. + + +### Custom max body size +For NGINX, 413 error will be returned to the client when the size in a request exceeds the maximum allowed size of the client request body. This size can be configured by the parameter [`client_max_body_size`](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size). + +To configure this setting globally for all Ingress rules, the `proxy-body-size` value may be set in the NGINX ConfigMap. + +To use custom values in an Ingress rule define these annotation: + +``` +ingress.kubernetes.io/proxy-body-size: 8m +``` diff --git a/controllers/nginx/pkg/cmd/controller/metrics.go b/controllers/nginx/pkg/cmd/controller/metrics.go index a4e8b562d..b803e6756 100644 --- a/controllers/nginx/pkg/cmd/controller/metrics.go +++ b/controllers/nginx/pkg/cmd/controller/metrics.go @@ -17,217 +17,79 @@ limitations under the License. package main import ( - "path/filepath" - "github.com/golang/glog" - - common "github.com/ncabatoff/process-exporter" - "github.com/ncabatoff/process-exporter/proc" "github.com/prometheus/client_golang/prometheus" + + "k8s.io/ingress/controllers/nginx/pkg/metric/collector" ) -type exeMatcher struct { - name string - args []string -} +const ( + ngxStatusPath = "/internal_nginx_status" + ngxVtsPath = "/nginx_status/format/json" +) -func (em exeMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) { - if len(nacl.Cmdline) == 0 { - return false, "" +func (n *NGINXController) setupMonitor(sm statusModule) { + csm := n.statusModule + if csm != sm { + glog.Infof("changing prometheus collector from %v to %v", csm, sm) + n.stats.stop(csm) + n.stats.start(sm) + n.statusModule = sm } - cmd := filepath.Base(nacl.Cmdline[0]) - return em.name == cmd, "" } -func (n *NGINXController) setupMonitor(args []string) { - pc, err := newProcessCollector(true, exeMatcher{"nginx", args}) +type statsCollector struct { + process prometheus.Collector + basic collector.Stopable + vts collector.Stopable + + namespace string + watchClass string +} + +func (s *statsCollector) stop(sm statusModule) { + switch sm { + case defaultStatusModule: + s.basic.Stop() + prometheus.Unregister(s.basic) + break + case vtsStatusModule: + s.vts.Stop() + prometheus.Unregister(s.vts) + break + } +} + +func (s *statsCollector) start(sm statusModule) { + switch sm { + case defaultStatusModule: + s.basic = collector.NewNginxStatus(s.namespace, s.watchClass, ngxHealthPort, ngxStatusPath) + prometheus.Register(s.basic) + break + case vtsStatusModule: + s.vts = collector.NewNGINXVTSCollector(s.namespace, s.watchClass, ngxHealthPort, ngxVtsPath) + prometheus.Register(s.vts) + break + } +} + +func newStatsCollector(ns, class, binary string) *statsCollector { + glog.Infof("starting new nginx stats collector for Ingress controller running in namespace %v (class %v)", ns, class) + pc, err := collector.NewNamedProcess(true, collector.BinaryNameMatcher{ + Name: "nginx", + Binary: binary, + }) if err != nil { glog.Fatalf("unexpected error registering nginx collector: %v", err) } err = prometheus.Register(pc) if err != nil { - glog.Warningf("unexpected error registering nginx collector: %v", err) - } -} - -var ( - numprocsDesc = prometheus.NewDesc( - "nginx_num_procs", - "number of processes", - nil, nil) - - cpuSecsDesc = prometheus.NewDesc( - "nginx_cpu_seconds_total", - "Cpu usage in seconds", - nil, nil) - - readBytesDesc = prometheus.NewDesc( - "nginx_read_bytes_total", - "number of bytes read", - nil, nil) - - writeBytesDesc = prometheus.NewDesc( - "nginx_write_bytes_total", - "number of bytes written", - nil, nil) - - memResidentbytesDesc = prometheus.NewDesc( - "nginx_resident_memory_bytes", - "number of bytes of memory in use", - nil, nil) - - memVirtualbytesDesc = prometheus.NewDesc( - "nginx_virtual_memory_bytes", - "number of bytes of memory in use", - nil, nil) - - startTimeDesc = prometheus.NewDesc( - "nginx_oldest_start_time_seconds", - "start time in seconds since 1970/01/01", - nil, nil) - - activeDesc = prometheus.NewDesc( - "nginx_active_connections", - "total number of active connections", - nil, nil) - - acceptedDesc = prometheus.NewDesc( - "nginx_accepted_connections", - "total number of accepted client connections", - nil, nil) - - handledDesc = prometheus.NewDesc( - "nginx_handled_connections", - "total number of handled connections", - nil, nil) - - requestsDesc = prometheus.NewDesc( - "nginx_total_requests", - "total number of client requests", - nil, nil) - - readingDesc = prometheus.NewDesc( - "nginx_current_reading_connections", - "current number of connections where nginx is reading the request header", - nil, nil) - - writingDesc = prometheus.NewDesc( - "nginx_current_writing_connections", - "current number of connections where nginx is writing the response back to the client", - nil, nil) - - waitingDesc = prometheus.NewDesc( - "nginx_current_waiting_connections", - "current number of idle client connections waiting for a request", - nil, nil) -) - -type ( - scrapeRequest struct { - results chan<- prometheus.Metric - done chan struct{} - } - - namedProcessCollector struct { - scrapeChan chan scrapeRequest - *proc.Grouper - fs *proc.FS - } -) - -func newProcessCollector( - children bool, - n common.MatchNamer) (*namedProcessCollector, error) { - - fs, err := proc.NewFS("/proc") - if err != nil { - return nil, err - } - p := &namedProcessCollector{ - scrapeChan: make(chan scrapeRequest), - Grouper: proc.NewGrouper(children, n), - fs: fs, - } - _, err = p.Update(p.fs.AllProcs()) - if err != nil { - return nil, err - } - - go p.start() - - return p, nil -} - -// Describe implements prometheus.Collector. -func (p *namedProcessCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- cpuSecsDesc - ch <- numprocsDesc - ch <- readBytesDesc - ch <- writeBytesDesc - ch <- memResidentbytesDesc - ch <- memVirtualbytesDesc - ch <- startTimeDesc -} - -// Collect implements prometheus.Collector. -func (p *namedProcessCollector) Collect(ch chan<- prometheus.Metric) { - req := scrapeRequest{results: ch, done: make(chan struct{})} - p.scrapeChan <- req - <-req.done -} - -func (p *namedProcessCollector) start() { - for req := range p.scrapeChan { - ch := req.results - p.scrape(ch) - req.done <- struct{}{} - } -} - -func (p *namedProcessCollector) scrape(ch chan<- prometheus.Metric) { - s, err := getNginxStatus() - if err != nil { - glog.Warningf("unexpected error obtaining nginx status info: %v", err) - return - } - - ch <- prometheus.MustNewConstMetric(activeDesc, - prometheus.GaugeValue, float64(s.Active)) - ch <- prometheus.MustNewConstMetric(acceptedDesc, - prometheus.GaugeValue, float64(s.Accepted)) - ch <- prometheus.MustNewConstMetric(handledDesc, - prometheus.GaugeValue, float64(s.Handled)) - ch <- prometheus.MustNewConstMetric(requestsDesc, - prometheus.GaugeValue, float64(s.Requests)) - ch <- prometheus.MustNewConstMetric(readingDesc, - prometheus.GaugeValue, float64(s.Reading)) - ch <- prometheus.MustNewConstMetric(writingDesc, - prometheus.GaugeValue, float64(s.Writing)) - ch <- prometheus.MustNewConstMetric(waitingDesc, - prometheus.GaugeValue, float64(s.Waiting)) - - _, err = p.Update(p.fs.AllProcs()) - if err != nil { - glog.Warningf("unexpected error obtaining nginx process info: %v", err) - return - } - - for gname, gcounts := range p.Groups() { - glog.Infof("%v", gname) - glog.Infof("%v", gcounts) - ch <- prometheus.MustNewConstMetric(numprocsDesc, - prometheus.GaugeValue, float64(gcounts.Procs)) - ch <- prometheus.MustNewConstMetric(memResidentbytesDesc, - prometheus.GaugeValue, float64(gcounts.Memresident)) - ch <- prometheus.MustNewConstMetric(memVirtualbytesDesc, - prometheus.GaugeValue, float64(gcounts.Memvirtual)) - ch <- prometheus.MustNewConstMetric(startTimeDesc, - prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix())) - ch <- prometheus.MustNewConstMetric(cpuSecsDesc, - prometheus.CounterValue, gcounts.Cpu) - ch <- prometheus.MustNewConstMetric(readBytesDesc, - prometheus.CounterValue, float64(gcounts.ReadBytes)) - ch <- prometheus.MustNewConstMetric(writeBytesDesc, - prometheus.CounterValue, float64(gcounts.WriteBytes)) + glog.Fatalf("unexpected error registering nginx collector: %v", err) + } + + return &statsCollector{ + namespace: ns, + watchClass: class, + process: pc, } } diff --git a/controllers/nginx/pkg/cmd/controller/nginx.go b/controllers/nginx/pkg/cmd/controller/nginx.go index 9e2bb64d2..1bd7ede60 100644 --- a/controllers/nginx/pkg/cmd/controller/nginx.go +++ b/controllers/nginx/pkg/cmd/controller/nginx.go @@ -29,27 +29,35 @@ import ( "time" "github.com/golang/glog" - "github.com/mitchellh/mapstructure" + "github.com/spf13/pflag" "k8s.io/kubernetes/pkg/api" + "strings" + "k8s.io/ingress/controllers/nginx/pkg/config" ngx_template "k8s.io/ingress/controllers/nginx/pkg/template" "k8s.io/ingress/controllers/nginx/pkg/version" "k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/ingress/defaults" + "k8s.io/ingress/core/pkg/net/ssl" ) +type statusModule string + const ( ngxHealthPort = 18080 ngxHealthPath = "/healthz" - ngxStatusPath = "/internal_nginx_status" + + defaultStatusModule statusModule = "default" + vtsStatusModule statusModule = "vts" ) var ( - tmplPath = "/etc/nginx/template/nginx.tmpl" - cfgPath = "/etc/nginx/nginx.conf" - binary = "/usr/sbin/nginx" + tmplPath = "/etc/nginx/template/nginx.tmpl" + cfgPath = "/etc/nginx/nginx.conf" + binary = "/usr/sbin/nginx" + defIngressClass = "nginx" ) // newNGINXController creates a new NGINX Ingress controller. @@ -60,7 +68,7 @@ func newNGINXController() ingress.Controller { if ngx == "" { ngx = binary } - n := NGINXController{ + n := &NGINXController{ binary: ngx, configmap: &api.ConfigMap{}, } @@ -92,7 +100,7 @@ Error loading new template : %v go n.Start() - return ingress.Controller(&n) + return ingress.Controller(n) } // NGINXController ... @@ -101,11 +109,21 @@ type NGINXController struct { configmap *api.ConfigMap + storeLister ingress.StoreLister + binary string + + cmdArgs []string + + watchClass string + namespace string + + stats *statsCollector + statusModule statusModule } // Start start a new NGINX master process running in foreground. -func (n NGINXController) Start() { +func (n *NGINXController) Start() { glog.Info("starting NGINX process...") done := make(chan error, 1) @@ -132,10 +150,10 @@ NGINX master process died (%v): %v // we wait until the workers are killed for { conn, err := net.DialTimeout("tcp", "127.0.0.1:80", 1*time.Second) - if err == nil { - conn.Close() + if err != nil { break } + conn.Close() time.Sleep(1 * time.Second) } // start a new nginx master process @@ -152,7 +170,7 @@ func (n *NGINXController) start(cmd *exec.Cmd, done chan error) { return } - n.setupMonitor(cmd.Args) + n.cmdArgs = cmd.Args go func() { done <- cmd.Wait() @@ -172,6 +190,7 @@ func (n NGINXController) Reload(data []byte) ([]byte, bool, error) { } o, e := exec.Command(n.binary, "-s", "reload").CombinedOutput() + return o, true, e } @@ -182,23 +201,7 @@ func (n NGINXController) BackendDefaults() defaults.Backend { return d.Backend } - return n.backendDefaults() -} - -func (n *NGINXController) backendDefaults() defaults.Backend { - d := config.NewDefault() - config := &mapstructure.DecoderConfig{ - Metadata: nil, - WeaklyTypedInput: true, - Result: &d, - TagName: "json", - } - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - glog.Warningf("unexpected error merging defaults: %v", err) - } - decoder.Decode(n.configmap.Data) - return d.Backend + return ngx_template.ReadConfig(n.configmap.Data).Backend } // isReloadRequired check if the new configuration file is different @@ -215,6 +218,7 @@ func (n NGINXController) isReloadRequired(data []byte) bool { } if !bytes.Equal(src, data) { + tmpfile, err := ioutil.TempFile("", "nginx-cfg-diff") if err != nil { glog.Errorf("error creating temporal file: %s", err) @@ -236,6 +240,7 @@ func (n NGINXController) isReloadRequired(data []byte) bool { glog.Infof("NGINX configuration diff\n") glog.Infof("%v", string(diffOutput)) } + os.Remove(tmpfile.Name()) return len(diffOutput) > 0 } return false @@ -251,6 +256,28 @@ func (n NGINXController) Info() *ingress.BackendInfo { } } +// OverrideFlags customize NGINX controller flags +func (n *NGINXController) OverrideFlags(flags *pflag.FlagSet) { + ic, _ := flags.GetString("ingress-class") + wc, _ := flags.GetString("watch-namespace") + + if ic == "" { + ic = defIngressClass + } + + if ic != defIngressClass { + glog.Warningf("only Ingress with class %v will be processed by this ingress controller", ic) + } + + flags.Set("ingress-class", ic) + n.stats = newStatsCollector(ic, wc, n.binary) +} + +// DefaultIngressClass just return the default ingress class +func (n NGINXController) DefaultIngressClass() string { + return defIngressClass +} + // testTemplate checks if the NGINX configuration inside the byte array is valid // running the command "nginx -t" using a temporal file. func (n NGINXController) testTemplate(cfg []byte) error { @@ -259,7 +286,10 @@ func (n NGINXController) testTemplate(cfg []byte) error { return err } defer tmpfile.Close() - ioutil.WriteFile(tmpfile.Name(), cfg, 0644) + err = ioutil.WriteFile(tmpfile.Name(), cfg, 0644) + if err != nil { + return err + } out, err := exec.Command(n.binary, "-t", "-c", tmpfile.Name()).CombinedOutput() if err != nil { // this error is different from the rest because it must be clear why nginx is not working @@ -276,11 +306,16 @@ Error: %v return nil } -// SetConfig ... +// SetConfig sets the configured configmap func (n *NGINXController) SetConfig(cmap *api.ConfigMap) { n.configmap = cmap } +// SetListers sets the configured store listers in the generic ingress controller +func (n *NGINXController) SetListers(lister ingress.StoreLister) { + n.storeLister = lister +} + // OnUpdate is called by syncQueue in https://github.com/aledbf/ingress-controller/blob/master/pkg/ingress/controller/controller.go#L82 // periodically to keep the configuration in sync. // @@ -301,6 +336,13 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er cfg := ngx_template.ReadConfig(n.configmap.Data) + // we need to check if the status module configuration changed + if cfg.EnableVtsStatus { + n.setupMonitor(vtsStatusModule) + } else { + n.setupMonitor(defaultStatusModule) + } + // NGINX cannot resize the has tables used to store server names. // For this reason we check if the defined size defined is correct // for the FQDN defined in the ingress rules adjusting the value @@ -324,18 +366,66 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er // and we leave some room to avoid consuming all the FDs available maxOpenFiles := (sysctlFSFileMax() / cfg.WorkerProcesses) - 1024 - return n.t.Write(config.TemplateConfig{ + setHeaders := map[string]string{} + if cfg.ProxySetHeaders != "" { + cmap, exists, err := n.storeLister.ConfigMap.GetByKey(cfg.ProxySetHeaders) + if err != nil { + glog.Warningf("unexpected error reading configmap %v: %v", cfg.ProxySetHeaders, err) + } + + if exists { + setHeaders = cmap.(*api.ConfigMap).Data + } + } + + sslDHParam := "" + if cfg.SSLDHParam != "" { + secretName := cfg.SSLDHParam + s, exists, err := n.storeLister.Secret.GetByKey(secretName) + if err != nil { + glog.Warningf("unexpected error reading secret %v: %v", secretName, err) + } + + if exists { + secret := s.(*api.Secret) + nsSecName := strings.Replace(secretName, "/", "-", -1) + + dh, ok := secret.Data["dhparam.pem"] + if ok { + pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh) + if err != nil { + glog.Warningf("unexpected error adding or updating dhparam %v file: %v", nsSecName, err) + } else { + sslDHParam = pemFileName + } + } + } + } + + cfg.SSLDHParam = sslDHParam + + content, err := n.t.Write(config.TemplateConfig{ + ProxySetHeaders: setHeaders, MaxOpenFiles: maxOpenFiles, BacklogSize: sysctlSomaxconn(), Backends: ingressCfg.Backends, PassthroughBackends: ingressCfg.PassthroughBackends, Servers: ingressCfg.Servers, TCPBackends: ingressCfg.TCPEndpoints, - UDPBackends: ingressCfg.UPDEndpoints, + UDPBackends: ingressCfg.UDPEndpoints, HealthzURI: ngxHealthPath, CustomErrors: len(cfg.CustomHTTPErrors) > 0, Cfg: cfg, - }, n.testTemplate) + }) + if err != nil { + return nil, err + } + + if err := n.testTemplate(content); err != nil { + return nil, err + } + + return content, nil } // Name returns the healthcheck name diff --git a/controllers/nginx/pkg/cmd/controller/status.go b/controllers/nginx/pkg/cmd/controller/status.go deleted file mode 100644 index bfa1c383b..000000000 --- a/controllers/nginx/pkg/cmd/controller/status.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strconv" -) - -var ( - ac = regexp.MustCompile(`Active connections: (\d+)`) - sahr = regexp.MustCompile(`(\d+)\s(\d+)\s(\d+)`) - reading = regexp.MustCompile(`Reading: (\d+)`) - writing = regexp.MustCompile(`Writing: (\d+)`) - waiting = regexp.MustCompile(`Waiting: (\d+)`) -) - -type nginxStatus struct { - // Active total number of active connections - Active int - // Accepted total number of accepted client connections - Accepted int - // Handled total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit). - Handled int - // Requests total number of client requests. - Requests int - // Reading current number of connections where nginx is reading the request header. - Reading int - // Writing current number of connections where nginx is writing the response back to the client. - Writing int - // Waiting current number of idle client connections waiting for a request. - Waiting int -} - -func getNginxStatus() (*nginxStatus, error) { - resp, err := http.DefaultClient.Get(fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath)) - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err) - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx status page (%v)", err) - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, fmt.Errorf("unexpected error scraping nginx status page (status %v)", resp.StatusCode) - } - - return parse(string(data)), nil -} - -func parse(data string) *nginxStatus { - acr := ac.FindStringSubmatch(data) - sahrr := sahr.FindStringSubmatch(data) - readingr := reading.FindStringSubmatch(data) - writingr := writing.FindStringSubmatch(data) - waitingr := waiting.FindStringSubmatch(data) - - return &nginxStatus{ - toInt(acr, 1), - toInt(sahrr, 1), - toInt(sahrr, 2), - toInt(sahrr, 3), - toInt(readingr, 1), - toInt(writingr, 1), - toInt(waitingr, 1), - } -} - -func toInt(data []string, pos int) int { - if len(data) == 0 { - return 0 - } - if pos > len(data) { - return 0 - } - if v, err := strconv.Atoi(data[pos]); err == nil { - return v - } - return 0 -} diff --git a/controllers/nginx/pkg/config/config.go b/controllers/nginx/pkg/config/config.go index c3dc11331..ffd54676a 100644 --- a/controllers/nginx/pkg/config/config.go +++ b/controllers/nginx/pkg/config/config.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "fmt" "runtime" "github.com/golang/glog" @@ -46,6 +47,10 @@ const ( gzipTypes = "application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component" + logFormatUpstream = `%v - [$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status"` + + logFormatStream = `[$time_local] $protocol [$ssl_preread_server_name] [$stream_upstream] $status $bytes_sent $bytes_received $session_time` + // http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_buffer_size // Sets the size of the buffer used for sending data. // 4k helps NGINX to improve TLS Time To First Byte (TTTFB) @@ -88,10 +93,12 @@ type Configuration struct { // http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size ClientHeaderBufferSize string `json:"client-header-buffer-size"` - // EnableSPDY enables spdy and use ALPN and NPN to advertise the availability of the two protocols - // https://blog.cloudflare.com/open-sourcing-our-nginx-http-2-spdy-code - // By default this is enabled - EnableSPDY bool `json:"enable-spdy"` + // DisableAccessLog disables the Access Log globally from NGINX ingress controller + //http://nginx.org/en/docs/http/ngx_http_log_module.html + DisableAccessLog bool `json:"disable-access-log,omitempty"` + + // DisableIpv6 disable listening on ipv6 address + DisableIpv6 bool `json:"disable-ipv6,omitempty"` // EnableStickySessions enabled sticky sessions using cookies // https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng @@ -114,6 +121,14 @@ type Configuration struct { // Log levels above are listed in the order of increasing severity ErrorLogLevel string `json:"error-log-level,omitempty"` + // https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_field_size + // HTTP2MaxFieldSize Limits the maximum size of an HPACK-compressed request header field + HTTP2MaxFieldSize string `json:"http2-max-field-size,omitempty"` + + // https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_header_size + // HTTP2MaxHeaderSize Limits the maximum size of the entire request header list after HPACK decompression + HTTP2MaxHeaderSize string `json:"http2-max-header-size,omitempty"` + // Enables or disables the header HSTS in servers running SSL HSTS bool `json:"hsts,omitempty"` @@ -139,6 +154,14 @@ type Configuration struct { // Default: 4 8k LargeClientHeaderBuffers string `json:"large-client-header-buffers"` + // Customize upstream log_format + // http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format + LogFormatUpstream string `json:"log-format-upstream,omitempty"` + + // Customize stream log_format + // http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format + LogFormatStream string `json:"log-format-stream,omitempty"` + // Maximum number of simultaneous connections that can be opened by each worker process // http://nginx.org/en/docs/ngx_core_module.html#worker_connections MaxWorkerConnections int `json:"max-worker-connections,omitempty"` @@ -152,6 +175,9 @@ type Configuration struct { // of your external load balancer ProxyRealIPCIDR string `json:"proxy-real-ip-cidr,omitempty"` + // Sets the name of the configmap that contains the headers to pass to the backend + ProxySetHeaders string `json:"proxy-set-headers,omitempty"` + // Maximum size of the server names hash tables used in server names, map directive’s values, // MIME types, names of request header strings, etcd. // http://nginx.org/en/docs/hash.html @@ -173,7 +199,7 @@ type Configuration struct { // http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers SSLCiphers string `json:"ssl-ciphers,omitempty"` - // Base64 string that contains Diffie-Hellman key to help with "Perfect Forward Secrecy" + // The secret that contains Diffie-Hellman key to help with "Perfect Forward Secrecy" // https://www.openssl.org/docs/manmaster/apps/dhparam.html // https://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam // http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam @@ -234,14 +260,17 @@ func NewDefault() Configuration { cfg := Configuration{ ClientHeaderBufferSize: "1k", EnableDynamicTLSRecords: true, - EnableSPDY: false, ErrorLogLevel: errorLevel, + HTTP2MaxFieldSize: "4k", + HTTP2MaxHeaderSize: "16k", HSTS: true, HSTSIncludeSubdomains: true, HSTSMaxAge: hstsMaxAge, GzipTypes: gzipTypes, KeepAlive: 75, LargeClientHeaderBuffers: "4 8k", + LogFormatStream: logFormatStream, + LogFormatUpstream: logFormatUpstream, MaxWorkerConnections: 16384, MapHashBucketSize: 64, ProxyRealIPCIDR: defIPCIDR, @@ -255,7 +284,6 @@ func NewDefault() Configuration { SSLSessionCacheSize: sslSessionCacheSize, SSLSessionTickets: true, SSLSessionTimeout: sslSessionTimeout, - UseProxyProtocol: false, UseGzip: true, WorkerProcesses: runtime.NumCPU(), VtsStatusZoneSize: "10m", @@ -266,11 +294,12 @@ func NewDefault() Configuration { ProxyReadTimeout: 60, ProxySendTimeout: 60, ProxyBufferSize: "4k", + ProxyCookieDomain: "off", + ProxyCookiePath: "off", SSLRedirect: true, CustomHTTPErrors: []int{}, WhitelistSourceRange: []string{}, SkipAccessLogURLs: []string{}, - UsePortInRedirects: false, }, } @@ -281,15 +310,30 @@ func NewDefault() Configuration { return cfg } +// BuildLogFormatUpstream format the log_format upstream using +// proxy_protocol_addr as remote client address if UseProxyProtocol +// is enabled. +func (cfg Configuration) BuildLogFormatUpstream() string { + if cfg.LogFormatUpstream == logFormatUpstream { + if cfg.UseProxyProtocol { + return fmt.Sprintf(cfg.LogFormatUpstream, "$proxy_protocol_addr") + } + return fmt.Sprintf(cfg.LogFormatUpstream, "$remote_addr") + } + + return cfg.LogFormatUpstream +} + // TemplateConfig contains the nginx configuration to render the file nginx.conf type TemplateConfig struct { + ProxySetHeaders map[string]string MaxOpenFiles int BacklogSize int Backends []*ingress.Backend PassthroughBackends []*ingress.SSLPassthroughBackend Servers []*ingress.Server - TCPBackends []*ingress.Location - UDPBackends []*ingress.Location + TCPBackends []ingress.L4Service + UDPBackends []ingress.L4Service HealthzURI string CustomErrors bool Cfg Configuration diff --git a/controllers/nginx/pkg/config/config_test.go b/controllers/nginx/pkg/config/config_test.go new file mode 100644 index 000000000..359cb1306 --- /dev/null +++ b/controllers/nginx/pkg/config/config_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "testing" +) + +func TestBuildLogFormatUpstream(t *testing.T) { + + testCases := []struct { + useProxyProtocol bool // use proxy protocol + curLogFormat string + expected string + }{ + {true, logFormatUpstream, fmt.Sprintf(logFormatUpstream, "$proxy_protocol_addr")}, + {false, logFormatUpstream, fmt.Sprintf(logFormatUpstream, "$remote_addr")}, + {true, "my-log-format", "my-log-format"}, + {false, "john-log-format", "john-log-format"}, + } + + for _, testCase := range testCases { + cfg := NewDefault() + cfg.UseProxyProtocol = testCase.useProxyProtocol + cfg.LogFormatUpstream = testCase.curLogFormat + result := cfg.BuildLogFormatUpstream() + if result != testCase.expected { + t.Errorf(" expected %v but return %v", testCase.expected, result) + } + } +} diff --git a/controllers/nginx/pkg/metric/collector/nginx.go b/controllers/nginx/pkg/metric/collector/nginx.go new file mode 100644 index 000000000..944eb920d --- /dev/null +++ b/controllers/nginx/pkg/metric/collector/nginx.go @@ -0,0 +1,160 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collector + +import ( + "fmt" + + "github.com/golang/glog" + "github.com/prometheus/client_golang/prometheus" +) + +type ( + nginxStatusCollector struct { + scrapeChan chan scrapeRequest + ngxHealthPort int + ngxVtsPath string + data *nginxStatusData + } + + nginxStatusData struct { + active *prometheus.Desc + accepted *prometheus.Desc + handled *prometheus.Desc + requests *prometheus.Desc + reading *prometheus.Desc + writing *prometheus.Desc + waiting *prometheus.Desc + } +) + +func buildNS(namespace, class string) string { + if namespace == "" { + namespace = "all" + } + if class == "" { + class = "all" + } + + return fmt.Sprintf("%v_%v", namespace, class) +} + +// NewNginxStatus returns a new prometheus collector the default nginx status module +func NewNginxStatus(namespace, class string, ngxHealthPort int, ngxVtsPath string) Stopable { + p := nginxStatusCollector{ + scrapeChan: make(chan scrapeRequest), + ngxHealthPort: ngxHealthPort, + ngxVtsPath: ngxVtsPath, + } + + ns := buildNS(namespace, class) + + p.data = &nginxStatusData{ + active: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "active_connections"), + "total number of active connections", + nil, nil), + + accepted: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "accepted_connections"), + "total number of accepted client connections", + nil, nil), + + handled: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "handled_connections"), + "total number of handled connections", + nil, nil), + + requests: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "total_requests"), + "total number of client requests", + nil, nil), + + reading: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "current_reading_connections"), + "current number of connections where nginx is reading the request header", + nil, nil), + + writing: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "current_writing_connections"), + "current number of connections where nginx is writing the response back to the client", + nil, nil), + + waiting: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "current_waiting_connections"), + "current number of idle client connections waiting for a request", + nil, nil), + } + + go p.start() + + return p +} + +// Describe implements prometheus.Collector. +func (p nginxStatusCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- p.data.active + ch <- p.data.accepted + ch <- p.data.handled + ch <- p.data.requests + ch <- p.data.reading + ch <- p.data.writing + ch <- p.data.waiting +} + +// Collect implements prometheus.Collector. +func (p nginxStatusCollector) Collect(ch chan<- prometheus.Metric) { + req := scrapeRequest{results: ch, done: make(chan struct{})} + p.scrapeChan <- req + <-req.done +} + +func (p nginxStatusCollector) start() { + for req := range p.scrapeChan { + ch := req.results + p.scrape(ch) + req.done <- struct{}{} + } +} + +func (p nginxStatusCollector) Stop() { + close(p.scrapeChan) +} + +// nginxStatusCollector scrap the nginx status +func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) { + s, err := getNginxStatus(p.ngxHealthPort, p.ngxVtsPath) + if err != nil { + glog.Warningf("unexpected error obtaining nginx status info: %v", err) + return + } + + ch <- prometheus.MustNewConstMetric(p.data.active, + prometheus.GaugeValue, float64(s.Active)) + ch <- prometheus.MustNewConstMetric(p.data.accepted, + prometheus.GaugeValue, float64(s.Accepted)) + ch <- prometheus.MustNewConstMetric(p.data.handled, + prometheus.GaugeValue, float64(s.Handled)) + ch <- prometheus.MustNewConstMetric(p.data.requests, + prometheus.GaugeValue, float64(s.Requests)) + ch <- prometheus.MustNewConstMetric(p.data.reading, + prometheus.GaugeValue, float64(s.Reading)) + ch <- prometheus.MustNewConstMetric(p.data.writing, + prometheus.GaugeValue, float64(s.Writing)) + ch <- prometheus.MustNewConstMetric(p.data.waiting, + prometheus.GaugeValue, float64(s.Waiting)) +} diff --git a/controllers/nginx/pkg/metric/collector/process.go b/controllers/nginx/pkg/metric/collector/process.go new file mode 100644 index 000000000..8e9f3ec3f --- /dev/null +++ b/controllers/nginx/pkg/metric/collector/process.go @@ -0,0 +1,173 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collector + +import ( + "path/filepath" + + "github.com/golang/glog" + common "github.com/ncabatoff/process-exporter" + "github.com/ncabatoff/process-exporter/proc" + "github.com/prometheus/client_golang/prometheus" +) + +// BinaryNameMatcher ... +type BinaryNameMatcher struct { + Name string + Binary string +} + +// MatchAndName returns false if the match failed, otherwise +// true and the resulting name. +func (em BinaryNameMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) { + if len(nacl.Cmdline) == 0 { + return false, "" + } + cmd := filepath.Base(em.Binary) + return em.Name == cmd, "" +} + +type namedProcessData struct { + numProcs *prometheus.Desc + cpuSecs *prometheus.Desc + readBytes *prometheus.Desc + writeBytes *prometheus.Desc + memResidentbytes *prometheus.Desc + memVirtualbytes *prometheus.Desc + startTime *prometheus.Desc +} + +type namedProcess struct { + *proc.Grouper + + scrapeChan chan scrapeRequest + fs *proc.FS + data namedProcessData +} + +// NewNamedProcess returns a new prometheus collector for the nginx process +func NewNamedProcess(children bool, mn common.MatchNamer) (prometheus.Collector, error) { + fs, err := proc.NewFS("/proc") + if err != nil { + return nil, err + } + p := namedProcess{ + scrapeChan: make(chan scrapeRequest), + Grouper: proc.NewGrouper(children, mn), + fs: fs, + } + _, err = p.Update(p.fs.AllProcs()) + if err != nil { + return nil, err + } + + p.data = namedProcessData{ + numProcs: prometheus.NewDesc( + "num_procs", + "number of processes", + nil, nil), + + cpuSecs: prometheus.NewDesc( + "cpu_seconds_total", + "Cpu usage in seconds", + nil, nil), + + readBytes: prometheus.NewDesc( + "read_bytes_total", + "number of bytes read", + nil, nil), + + writeBytes: prometheus.NewDesc( + "write_bytes_total", + "number of bytes written", + nil, nil), + + memResidentbytes: prometheus.NewDesc( + "resident_memory_bytes", + "number of bytes of memory in use", + nil, nil), + + memVirtualbytes: prometheus.NewDesc( + "virtual_memory_bytes", + "number of bytes of memory in use", + nil, nil), + + startTime: prometheus.NewDesc( + "oldest_start_time_seconds", + "start time in seconds since 1970/01/01", + nil, nil), + } + + go p.start() + + return p, nil +} + +// Describe implements prometheus.Collector. +func (p namedProcess) Describe(ch chan<- *prometheus.Desc) { + ch <- p.data.cpuSecs + ch <- p.data.numProcs + ch <- p.data.readBytes + ch <- p.data.writeBytes + ch <- p.data.memResidentbytes + ch <- p.data.memVirtualbytes + ch <- p.data.startTime +} + +// Collect implements prometheus.Collector. +func (p namedProcess) Collect(ch chan<- prometheus.Metric) { + req := scrapeRequest{results: ch, done: make(chan struct{})} + p.scrapeChan <- req + <-req.done +} + +func (p namedProcess) start() { + for req := range p.scrapeChan { + ch := req.results + p.scrape(ch) + req.done <- struct{}{} + } +} + +func (p namedProcess) Stop() { + close(p.scrapeChan) +} + +func (p namedProcess) scrape(ch chan<- prometheus.Metric) { + _, err := p.Update(p.fs.AllProcs()) + if err != nil { + glog.Warningf("unexpected error obtaining nginx process info: %v", err) + return + } + + for _, gcounts := range p.Groups() { + ch <- prometheus.MustNewConstMetric(p.data.numProcs, + prometheus.GaugeValue, float64(gcounts.Procs)) + ch <- prometheus.MustNewConstMetric(p.data.memResidentbytes, + prometheus.GaugeValue, float64(gcounts.Memresident)) + ch <- prometheus.MustNewConstMetric(p.data.memVirtualbytes, + prometheus.GaugeValue, float64(gcounts.Memvirtual)) + ch <- prometheus.MustNewConstMetric(p.data.startTime, + prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix())) + ch <- prometheus.MustNewConstMetric(p.data.cpuSecs, + prometheus.CounterValue, gcounts.Cpu) + ch <- prometheus.MustNewConstMetric(p.data.readBytes, + prometheus.CounterValue, float64(gcounts.ReadBytes)) + ch <- prometheus.MustNewConstMetric(p.data.writeBytes, + prometheus.CounterValue, float64(gcounts.WriteBytes)) + } +} diff --git a/controllers/nginx/pkg/metric/collector/scrape.go b/controllers/nginx/pkg/metric/collector/scrape.go new file mode 100644 index 000000000..a078b2859 --- /dev/null +++ b/controllers/nginx/pkg/metric/collector/scrape.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collector + +import "github.com/prometheus/client_golang/prometheus" + +// Stopable defines a prometheus collector that can be stopped +type Stopable interface { + prometheus.Collector + Stop() +} + +type scrapeRequest struct { + results chan<- prometheus.Metric + done chan struct{} +} diff --git a/controllers/nginx/pkg/metric/collector/status.go b/controllers/nginx/pkg/metric/collector/status.go new file mode 100644 index 000000000..1a0fcaf0e --- /dev/null +++ b/controllers/nginx/pkg/metric/collector/status.go @@ -0,0 +1,225 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collector + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + + "github.com/golang/glog" +) + +var ( + ac = regexp.MustCompile(`Active connections: (\d+)`) + sahr = regexp.MustCompile(`(\d+)\s(\d+)\s(\d+)`) + reading = regexp.MustCompile(`Reading: (\d+)`) + writing = regexp.MustCompile(`Writing: (\d+)`) + waiting = regexp.MustCompile(`Waiting: (\d+)`) +) + +type basicStatus struct { + // Active total number of active connections + Active int + // Accepted total number of accepted client connections + Accepted int + // Handled total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached (for example, the worker_connections limit). + Handled int + // Requests total number of client requests. + Requests int + // Reading current number of connections where nginx is reading the request header. + Reading int + // Writing current number of connections where nginx is writing the response back to the client. + Writing int + // Waiting current number of idle client connections waiting for a request. + Waiting int +} + +// https://github.com/vozlt/nginx-module-vts +type vts struct { + NginxVersion string `json:"nginxVersion"` + LoadMsec int `json:"loadMsec"` + NowMsec int `json:"nowMsec"` + // Total connections and requests(same as stub_status_module in NGINX) + Connections connections `json:"connections"` + // Traffic(in/out) and request and response counts and cache hit ratio per each server zone + ServerZones map[string]serverZone `json:"serverZones"` + // Traffic(in/out) and request and response counts and cache hit ratio per each server zone filtered through + // the vhost_traffic_status_filter_by_set_key directive + FilterZones map[string]map[string]filterZone `json:"filterZones"` + // Traffic(in/out) and request and response counts per server in each upstream group + UpstreamZones map[string][]upstreamZone `json:"upstreamZones"` +} + +type serverZone struct { + RequestCounter float64 `json:"requestCounter"` + InBytes float64 `json:"inBytes"` + OutBytes float64 `json:"outBytes"` + Responses response `json:"responses"` + Cache cache `json:"cache"` +} + +type filterZone struct { + RequestCounter float64 `json:"requestCounter"` + InBytes float64 `json:"inBytes"` + OutBytes float64 `json:"outBytes"` + Cache cache `json:"cache"` + Responses response `json:"responses"` +} + +type upstreamZone struct { + Responses response `json:"responses"` + Server string `json:"server"` + RequestCounter float64 `json:"requestCounter"` + InBytes float64 `json:"inBytes"` + OutBytes float64 `json:"outBytes"` + ResponseMsec float64 `json:"responseMsec"` + Weight float64 `json:"weight"` + MaxFails float64 `json:"maxFails"` + FailTimeout float64 `json:"failTimeout"` + Backup BoolToFloat64 `json:"backup"` + Down BoolToFloat64 `json:"down"` +} + +type cache struct { + Miss float64 `json:"miss"` + Bypass float64 `json:"bypass"` + Expired float64 `json:"expired"` + Stale float64 `json:"stale"` + Updating float64 `json:"updating"` + Revalidated float64 `json:"revalidated"` + Hit float64 `json:"hit"` + Scarce float64 `json:"scarce"` +} + +type response struct { + OneXx float64 `json:"1xx"` + TwoXx float64 `json:"2xx"` + TheeXx float64 `json:"3xx"` + FourXx float64 `json:"4xx"` + FiveXx float64 `json:"5xx"` +} + +type connections struct { + Active float64 `json:"active"` + Reading float64 `json:"reading"` + Writing float64 `json:"writing"` + Waiting float64 `json:"waiting"` + Accepted float64 `json:"accepted"` + Handled float64 `json:"handled"` + Requests float64 `json:"requests"` +} + +// BoolToFloat64 ... +type BoolToFloat64 float64 + +// UnmarshalJSON ... +func (bit BoolToFloat64) UnmarshalJSON(data []byte) error { + asString := string(data) + if asString == "1" || asString == "true" { + bit = 1 + } else if asString == "0" || asString == "false" { + bit = 0 + } else { + return fmt.Errorf(fmt.Sprintf("Boolean unmarshal error: invalid input %s", asString)) + } + return nil +} + +func getNginxStatus(ngxHealthPort int, ngxStatusPath string) (*basicStatus, error) { + url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath) + glog.V(3).Infof("start scrapping url: %v", url) + + data, err := httpBody(url) + + if err != nil { + return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err) + } + + return parse(string(data)), nil +} + +func httpBody(url string) ([]byte, error) { + resp, err := http.DefaultClient.Get(url) + if err != nil { + return nil, fmt.Errorf("unexpected error scraping nginx : %v", err) + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unexpected error scraping nginx (%v)", err) + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, fmt.Errorf("unexpected error scraping nginx (status %v)", resp.StatusCode) + } + + return data, nil +} + +func getNginxVtsMetrics(ngxHealthPort int, ngxVtsPath string) (*vts, error) { + url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxVtsPath) + glog.V(3).Infof("start scrapping url: %v", url) + + data, err := httpBody(url) + + if err != nil { + return nil, fmt.Errorf("unexpected error scraping nginx vts (%v)", err) + } + + var vts *vts + err = json.Unmarshal(data, &vts) + if err != nil { + return nil, fmt.Errorf("unexpected error json unmarshal (%v)", err) + } + glog.V(3).Infof("scrap returned : %v", vts) + return vts, nil +} + +func parse(data string) *basicStatus { + acr := ac.FindStringSubmatch(data) + sahrr := sahr.FindStringSubmatch(data) + readingr := reading.FindStringSubmatch(data) + writingr := writing.FindStringSubmatch(data) + waitingr := waiting.FindStringSubmatch(data) + + return &basicStatus{ + toInt(acr, 1), + toInt(sahrr, 1), + toInt(sahrr, 2), + toInt(sahrr, 3), + toInt(readingr, 1), + toInt(writingr, 1), + toInt(waitingr, 1), + } +} + +func toInt(data []string, pos int) int { + if len(data) == 0 { + return 0 + } + if pos > len(data) { + return 0 + } + if v, err := strconv.Atoi(data[pos]); err == nil { + return v + } + return 0 +} diff --git a/controllers/nginx/pkg/cmd/controller/status_test.go b/controllers/nginx/pkg/metric/collector/status_test.go similarity index 85% rename from controllers/nginx/pkg/cmd/controller/status_test.go rename to controllers/nginx/pkg/metric/collector/status_test.go index 1dda3a01e..5d3075dae 100644 --- a/controllers/nginx/pkg/cmd/controller/status_test.go +++ b/controllers/nginx/pkg/metric/collector/status_test.go @@ -14,35 +14,37 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package collector import ( - "reflect" "testing" + + "github.com/kylelemons/godebug/pretty" ) func TestParseStatus(t *testing.T) { tests := []struct { in string - out *nginxStatus + out *basicStatus }{ {`Active connections: 43 server accepts handled requests 7368 7368 10993 Reading: 0 Writing: 5 Waiting: 38`, - &nginxStatus{43, 7368, 7368, 10993, 0, 5, 38}, + &basicStatus{43, 7368, 7368, 10993, 0, 5, 38}, }, {`Active connections: 0 server accepts handled requests 1 7 0 Reading: A Writing: B Waiting: 38`, - &nginxStatus{0, 1, 7, 0, 0, 0, 38}, + &basicStatus{0, 1, 7, 0, 0, 0, 38}, }, } for _, test := range tests { r := parse(test.in) - if !reflect.DeepEqual(r, test.out) { + if diff := pretty.Compare(r, test.out); diff != "" { + t.Logf("%v", diff) t.Fatalf("expected %v but returned %v", test.out, r) } } diff --git a/controllers/nginx/pkg/metric/collector/vts.go b/controllers/nginx/pkg/metric/collector/vts.go new file mode 100644 index 000000000..4d80d66c4 --- /dev/null +++ b/controllers/nginx/pkg/metric/collector/vts.go @@ -0,0 +1,269 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collector + +import ( + "reflect" + + "github.com/golang/glog" + "github.com/prometheus/client_golang/prometheus" +) + +const system = "nginx" + +type ( + vtsCollector struct { + scrapeChan chan scrapeRequest + ngxHealthPort int + ngxVtsPath string + data *vtsData + } + + vtsData struct { + bytes *prometheus.Desc + cache *prometheus.Desc + connections *prometheus.Desc + response *prometheus.Desc + request *prometheus.Desc + filterZoneBytes *prometheus.Desc + filterZoneResponse *prometheus.Desc + filterZoneCache *prometheus.Desc + upstreamBackup *prometheus.Desc + upstreamBytes *prometheus.Desc + upstreamDown *prometheus.Desc + upstreamFailTimeout *prometheus.Desc + upstreamMaxFails *prometheus.Desc + upstreamResponses *prometheus.Desc + upstreamRequest *prometheus.Desc + upstreamResponseMsec *prometheus.Desc + upstreamWeight *prometheus.Desc + } +) + +// NewNGINXVTSCollector returns a new prometheus collector for the VTS module +func NewNGINXVTSCollector(namespace, class string, ngxHealthPort int, ngxVtsPath string) Stopable { + p := vtsCollector{ + scrapeChan: make(chan scrapeRequest), + ngxHealthPort: ngxHealthPort, + ngxVtsPath: ngxVtsPath, + } + + ns := buildNS(namespace, class) + + p.data = &vtsData{ + bytes: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "bytes_total"), + "Nginx bytes count", + []string{"server_zone", "direction"}, nil), + + cache: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "cache_total"), + "Nginx cache count", + []string{"server_zone", "type"}, nil), + + connections: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "connections_total"), + "Nginx connections count", + []string{"type"}, nil), + + response: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "responses_total"), + "The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", + []string{"server_zone", "status_code"}, nil), + + request: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "requests_total"), + "The total number of requested client connections.", + []string{"server_zone"}, nil), + + filterZoneBytes: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "filterzone_bytes_total"), + "Nginx bytes count", + []string{"server_zone", "country", "direction"}, nil), + + filterZoneResponse: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "filterzone_responses_total"), + "The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", + []string{"server_zone", "country", "status_code"}, nil), + + filterZoneCache: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "filterzone_cache_total"), + "Nginx cache count", + []string{"server_zone", "country", "type"}, nil), + + upstreamBackup: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "upstream_backup"), + "Current backup setting of the server.", + []string{"upstream", "server"}, nil), + + upstreamBytes: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "upstream_bytes_total"), + "The total number of bytes sent to this server.", + []string{"upstream", "server", "direction"}, nil), + + upstreamDown: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "vts_upstream_down_total"), + "Current down setting of the server.", + []string{"upstream", "server"}, nil), + + upstreamFailTimeout: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "upstream_fail_timeout"), + "Current fail_timeout setting of the server.", + []string{"upstream", "server"}, nil), + + upstreamMaxFails: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "upstream_maxfails"), + "Current max_fails setting of the server.", + []string{"upstream", "server"}, nil), + + upstreamResponses: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "upstream_responses_total"), + "The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", + []string{"upstream", "server", "status_code"}, nil), + + upstreamRequest: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "upstream_requests_total"), + "The total number of client connections forwarded to this server.", + []string{"upstream", "server"}, nil), + + upstreamResponseMsec: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "upstream_response_msecs_avg"), + "The average of only upstream response processing times in milliseconds.", + []string{"upstream", "server"}, nil), + + upstreamWeight: prometheus.NewDesc( + prometheus.BuildFQName(system, ns, "upstream_weight"), + "Current upstream weight setting of the server.", + []string{"upstream", "server"}, nil), + } + + go p.start() + + return p +} + +// Describe implements prometheus.Collector. +func (p vtsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- p.data.bytes + ch <- p.data.cache + ch <- p.data.connections + ch <- p.data.request + ch <- p.data.response + ch <- p.data.upstreamBackup + ch <- p.data.upstreamBytes + ch <- p.data.upstreamDown + ch <- p.data.upstreamFailTimeout + ch <- p.data.upstreamMaxFails + ch <- p.data.upstreamRequest + ch <- p.data.upstreamResponseMsec + ch <- p.data.upstreamResponses + ch <- p.data.upstreamWeight + ch <- p.data.filterZoneBytes + ch <- p.data.filterZoneCache + ch <- p.data.filterZoneResponse +} + +// Collect implements prometheus.Collector. +func (p vtsCollector) Collect(ch chan<- prometheus.Metric) { + req := scrapeRequest{results: ch, done: make(chan struct{})} + p.scrapeChan <- req + <-req.done +} + +func (p vtsCollector) start() { + for req := range p.scrapeChan { + ch := req.results + p.scrapeVts(ch) + req.done <- struct{}{} + } +} + +func (p vtsCollector) Stop() { + close(p.scrapeChan) +} + +// scrapeVts scrape nginx vts metrics +func (p vtsCollector) scrapeVts(ch chan<- prometheus.Metric) { + nginxMetrics, err := getNginxVtsMetrics(p.ngxHealthPort, p.ngxVtsPath) + if err != nil { + glog.Warningf("unexpected error obtaining nginx status info: %v", err) + return + } + + reflectMetrics(&nginxMetrics.Connections, p.data.connections, ch) + + for name, zones := range nginxMetrics.UpstreamZones { + for pos, value := range zones { + reflectMetrics(&zones[pos].Responses, p.data.upstreamResponses, ch, name, value.Server) + + ch <- prometheus.MustNewConstMetric(p.data.upstreamRequest, + prometheus.CounterValue, zones[pos].RequestCounter, name, value.Server) + ch <- prometheus.MustNewConstMetric(p.data.upstreamDown, + prometheus.CounterValue, float64(zones[pos].Down), name, value.Server) + ch <- prometheus.MustNewConstMetric(p.data.upstreamWeight, + prometheus.CounterValue, zones[pos].Weight, name, value.Server) + ch <- prometheus.MustNewConstMetric(p.data.upstreamResponseMsec, + prometheus.CounterValue, zones[pos].ResponseMsec, name, value.Server) + ch <- prometheus.MustNewConstMetric(p.data.upstreamBackup, + prometheus.CounterValue, float64(zones[pos].Backup), name, value.Server) + ch <- prometheus.MustNewConstMetric(p.data.upstreamFailTimeout, + prometheus.CounterValue, zones[pos].FailTimeout, name, value.Server) + ch <- prometheus.MustNewConstMetric(p.data.upstreamMaxFails, + prometheus.CounterValue, zones[pos].MaxFails, name, value.Server) + ch <- prometheus.MustNewConstMetric(p.data.upstreamBytes, + prometheus.CounterValue, zones[pos].InBytes, name, value.Server, "in") + ch <- prometheus.MustNewConstMetric(p.data.upstreamBytes, + prometheus.CounterValue, zones[pos].OutBytes, name, value.Server, "out") + } + } + + for name, zone := range nginxMetrics.ServerZones { + reflectMetrics(&zone.Responses, p.data.response, ch, name) + reflectMetrics(&zone.Cache, p.data.cache, ch, name) + + ch <- prometheus.MustNewConstMetric(p.data.request, + prometheus.CounterValue, zone.RequestCounter, name) + ch <- prometheus.MustNewConstMetric(p.data.bytes, + prometheus.CounterValue, zone.InBytes, name, "in") + ch <- prometheus.MustNewConstMetric(p.data.bytes, + prometheus.CounterValue, zone.OutBytes, name, "out") + } + + for serverZone, countries := range nginxMetrics.FilterZones { + for country, zone := range countries { + reflectMetrics(&zone.Responses, p.data.filterZoneResponse, ch, serverZone, country) + reflectMetrics(&zone.Cache, p.data.filterZoneCache, ch, serverZone, country) + + ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes, + prometheus.CounterValue, float64(zone.InBytes), serverZone, country, "in") + ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes, + prometheus.CounterValue, float64(zone.OutBytes), serverZone, country, "out") + } + } +} + +func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) { + val := reflect.ValueOf(value).Elem() + + for i := 0; i < val.NumField(); i++ { + tag := val.Type().Field(i).Tag + l := append(labels, tag.Get("json")) + ch <- prometheus.MustNewConstMetric(desc, + prometheus.CounterValue, float64(val.Field(i).Interface().(float64)), + l...) + } +} diff --git a/controllers/nginx/pkg/template/configmap_test.go b/controllers/nginx/pkg/template/configmap_test.go index 2e4c43af2..ff2c60203 100644 --- a/controllers/nginx/pkg/template/configmap_test.go +++ b/controllers/nginx/pkg/template/configmap_test.go @@ -39,12 +39,14 @@ func TestMergeConfigMapToStruct(t *testing.T) { "proxy-send-timeout": "2", "skip-access-log-urls": "/log,/demo,/test", "use-proxy-protocol": "true", + "disable-access-log": "true", "use-gzip": "true", "enable-dynamic-tls-records": "false", "gzip-types": "text/html", } def := config.NewDefault() def.CustomHTTPErrors = []int{300, 400} + def.DisableAccessLog = true def.SkipAccessLogURLs = []string{"/log", "/demo", "/test"} def.ProxyReadTimeout = 1 def.ProxySendTimeout = 2 diff --git a/controllers/nginx/pkg/template/template.go b/controllers/nginx/pkg/template/template.go index cc21dccac..234dd885b 100644 --- a/controllers/nginx/pkg/template/template.go +++ b/controllers/nginx/pkg/template/template.go @@ -78,7 +78,7 @@ func (t *Template) Close() { // Write populates a buffer using a template with NGINX configuration // and the servers and upstreams created by Ingress rules -func (t *Template) Write(conf config.TemplateConfig, isValidTemplate func([]byte) error) ([]byte, error) { +func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) { defer t.tmplBuf.Reset() defer t.outCmdBuf.Reset() @@ -114,13 +114,7 @@ func (t *Template) Write(conf config.TemplateConfig, isValidTemplate func([]byte return t.tmplBuf.Bytes(), nil } - content := t.outCmdBuf.Bytes() - err = isValidTemplate(content) - if err != nil { - return nil, err - } - - return content, nil + return t.outCmdBuf.Bytes(), nil } var ( @@ -132,21 +126,20 @@ var ( } return true }, - "buildLocation": buildLocation, - "buildAuthLocation": buildAuthLocation, - "buildProxyPass": buildProxyPass, - "buildRateLimitZones": buildRateLimitZones, - "buildRateLimit": buildRateLimit, - "buildSSPassthroughUpstreams": buildSSPassthroughUpstreams, - "buildResolvers": buildResolvers, - "isLocationAllowed": isLocationAllowed, - "buildStreamUpstreams": buildStreamUpstreams, - - "contains": strings.Contains, - "hasPrefix": strings.HasPrefix, - "hasSuffix": strings.HasSuffix, - "toUpper": strings.ToUpper, - "toLower": strings.ToLower, + "buildLocation": buildLocation, + "buildAuthLocation": buildAuthLocation, + "buildProxyPass": buildProxyPass, + "buildRateLimitZones": buildRateLimitZones, + "buildRateLimit": buildRateLimit, + "buildSSLPassthroughUpstreams": buildSSLPassthroughUpstreams, + "buildResolvers": buildResolvers, + "isLocationAllowed": isLocationAllowed, + "buildLogFormatUpstream": buildLogFormatUpstream, + "contains": strings.Contains, + "hasPrefix": strings.HasPrefix, + "hasSuffix": strings.HasSuffix, + "toUpper": strings.ToUpper, + "toLower": strings.ToLower, } ) @@ -171,7 +164,7 @@ func buildResolvers(a interface{}) string { return strings.Join(r, " ") } -func buildSSPassthroughUpstreams(b interface{}, sslb interface{}) string { +func buildSSLPassthroughUpstreams(b interface{}, sslb interface{}) string { backends := b.([]*ingress.Backend) sslBackends := sslb.([]*ingress.SSLPassthroughBackend) buf := bytes.NewBuffer(make([]byte, 0, 10)) @@ -199,34 +192,6 @@ func buildSSPassthroughUpstreams(b interface{}, sslb interface{}) string { return buf.String() } -func buildStreamUpstreams(proto string, b interface{}, s interface{}) string { - backends := b.([]*ingress.Backend) - streams := s.([]*ingress.Location) - buf := bytes.NewBuffer(make([]byte, 0, 10)) - // multiple services can use the same upstream. - // avoid duplications using a map[name]=true - u := make(map[string]bool) - for _, stream := range streams { - if u[stream.Backend] { - continue - } - u[stream.Backend] = true - fmt.Fprintf(buf, "upstream %v-%v {\n", proto, stream.Backend) - // TODO: find a better way to avoid empty stream upstreams - fmt.Fprintf(buf, "\t\tserver 127.0.0.1:8181 down;\n") - for _, backend := range backends { - if backend.Name == stream.Backend { - for _, server := range backend.Endpoints { - fmt.Fprintf(buf, "\t\tserver %v:%v;\n", server.Address, server.Port) - } - break - } - } - fmt.Fprint(buf, "\t}\n\n") - } - return buf.String() -} - // buildLocation produces the location string, if the ingress has redirects // (specified through the ingress.kubernetes.io/rewrite-to annotation) func buildLocation(input interface{}) string { @@ -237,7 +202,10 @@ func buildLocation(input interface{}) string { path := location.Path if len(location.Redirect.Target) > 0 && location.Redirect.Target != path { - return fmt.Sprintf("~* %s", path) + if path == slash { + return fmt.Sprintf("~* %s", path) + } + return fmt.Sprintf("~* ^%s", path) } return path @@ -259,6 +227,15 @@ func buildAuthLocation(input interface{}) string { return fmt.Sprintf("/_external-auth-%v", str) } +func buildLogFormatUpstream(input interface{}) string { + cfg, ok := input.(config.Configuration) + if !ok { + glog.Errorf("error an ingress.buildLogFormatUpstream type but %T was returned", input) + } + + return cfg.BuildLogFormatUpstream() +} + // buildProxyPass produces the proxy pass string, if the ingress has redirects // (specified through the ingress.kubernetes.io/rewrite-to annotation) // If the annotation ingress.kubernetes.io/add-base-url:"true" is specified it will diff --git a/controllers/nginx/pkg/template/template_test.go b/controllers/nginx/pkg/template/template_test.go index fde02e3ef..20e860c72 100644 --- a/controllers/nginx/pkg/template/template_test.go +++ b/controllers/nginx/pkg/template/template_test.go @@ -45,12 +45,12 @@ var ( rewrite /(.*) /jenkins/$1 break; proxy_pass http://upstream-name; `, false}, - "redirect /something to /": {"/something", "/", "~* /something", ` + "redirect /something to /": {"/something", "/", "~* ^/something", ` rewrite /something/(.*) /$1 break; rewrite /something / break; proxy_pass http://upstream-name; `, false}, - "redirect /something-complex to /not-root": {"/something-complex", "/not-root", "~* /something-complex", ` + "redirect /something-complex to /not-root": {"/something-complex", "/not-root", "~* ^/something-complex", ` rewrite /something-complex/(.*) /not-root/$1 break; proxy_pass http://upstream-name; `, false}, @@ -60,14 +60,14 @@ var ( subs_filter '' '' r; subs_filter '' '' r; `, true}, - "redirect /something to / and rewrite": {"/something", "/", "~* /something", ` + "redirect /something to / and rewrite": {"/something", "/", "~* ^/something", ` rewrite /something/(.*) /$1 break; rewrite /something / break; proxy_pass http://upstream-name; subs_filter '' '' r; subs_filter '' '' r; `, true}, - "redirect /something-complex to /not-root and rewrite": {"/something-complex", "/not-root", "~* /something-complex", ` + "redirect /something-complex to /not-root and rewrite": {"/something-complex", "/not-root", "~* ^/something-complex", ` rewrite /something-complex/(.*) /not-root/$1 break; proxy_pass http://upstream-name; subs_filter '' '' r; @@ -132,7 +132,7 @@ func TestTemplateWithData(t *testing.T) { t.Errorf("invalid NGINX template: %v", err) } - _, err = ngxTpl.Write(dat, func(b []byte) error { return nil }) + _, err = ngxTpl.Write(dat) if err != nil { t.Errorf("invalid NGINX template: %v", err) } @@ -166,6 +166,6 @@ func BenchmarkTemplateWithData(b *testing.B) { } for i := 0; i < b.N; i++ { - ngxTpl.Write(dat, func(b []byte) error { return nil }) + ngxTpl.Write(dat) } } diff --git a/controllers/nginx/rootfs/Dockerfile b/controllers/nginx/rootfs/Dockerfile index 74ee4f493..6959d6bbc 100644 --- a/controllers/nginx/rootfs/Dockerfile +++ b/controllers/nginx/rootfs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM gcr.io/google_containers/nginx-slim:0.13 +FROM gcr.io/google_containers/nginx-slim:0.14 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y \ diffutils \ diff --git a/controllers/nginx/rootfs/etc/nginx/lua/error_page.lua b/controllers/nginx/rootfs/etc/nginx/lua/error_page.lua index e8a26ee9e..2b9178a56 100644 --- a/controllers/nginx/rootfs/etc/nginx/lua/error_page.lua +++ b/controllers/nginx/rootfs/etc/nginx/lua/error_page.lua @@ -8,17 +8,17 @@ local get_upstreams = upstream.get_upstreams local random = math.random local us = get_upstreams() -function openURL(status) +function openURL(original_headers, status) local httpc = http.new() + original_headers["X-Code"] = status or "404" + original_headers["X-Format"] = original_headers["Accept"] or "text/html" + local random_backend = get_destination() local res, err = httpc:request_uri(random_backend, { path = "/", method = "GET", - headers = { - ["X-Code"] = status or "404", - ["X-Format"] = ngx.var.httpAccept or "html", - } + headers = original_headers, }) if not res then @@ -26,8 +26,8 @@ function openURL(status) ngx.exit(500) end - if ngx.var.http_cookie then - ngx.header["Cookie"] = ngx.var.http_cookie + for k,v in pairs(res.headers) do + ngx.header[k] = v end ngx.status = tonumber(status) diff --git a/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl b/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl index 2c4d3cc2e..07a7e7921 100644 --- a/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl +++ b/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl @@ -1,4 +1,8 @@ -{{ $cfg := .Cfg }}{{ $healthzURI := .HealthzURI }}{{ $backends := .Backends }} +{{ $cfg := .Cfg }} +{{ $healthzURI := .HealthzURI }} +{{ $backends := .Backends }} +{{ $proxyHeaders := .ProxySetHeaders }} +{{ $passthroughBackends := .PassthroughBackends }} daemon off; worker_processes {{ $cfg.WorkerProcesses }}; @@ -10,7 +14,7 @@ worker_rlimit_nofile {{ .MaxOpenFiles }}; events { multi_accept on; worker_connections {{ $cfg.MaxWorkerConnections }}; - use epoll; + use epoll; } http { @@ -22,7 +26,7 @@ http { real_ip_header X-Forwarded-For; set_real_ip_from 0.0.0.0/0; {{ end }} - + real_ip_recursive on; {{/* databases used to determine the country depending on the client IP address */}} @@ -47,7 +51,7 @@ http { aio threads; tcp_nopush on; tcp_nodelay on; - + log_subrequest on; reset_timedout_connection on; @@ -56,6 +60,9 @@ http { client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }}; large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }}; + + http2_max_field_size {{ $cfg.HTTP2MaxFieldSize }}; + http2_max_header_size {{ $cfg.HTTP2MaxHeaderSize }}; types_hash_max_size 2048; server_names_hash_max_size {{ $cfg.ServerNameHashMaxSize }}; @@ -69,25 +76,27 @@ http { gzip_comp_level 5; gzip_http_version 1.1; gzip_min_length 256; - gzip_types {{ $cfg.GzipTypes }}; + gzip_types {{ $cfg.GzipTypes }}; gzip_proxied any; {{ end }} - server_tokens {{ if $cfg.ShowServerTokens }}on{{ else }}off{{ end }}; + server_tokens {{ if $cfg.ShowServerTokens }}on{{ else }}off{{ end }}; - log_format upstreaminfo '{{ if $cfg.UseProxyProtocol }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - ' - '[$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' - '$request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status'; + log_format upstreaminfo '{{ buildLogFormatUpstream $cfg }}'; {{/* map urls that should not appear in access.log */}} {{/* http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log */}} - map $request $loggable { + map $request_uri $loggable { {{ range $reqUri := $cfg.SkipAccessLogURLs }} {{ $reqUri }} 0;{{ end }} default 1; } + {{ if $cfg.DisableAccessLog }} + access_log off; + {{ else }} access_log /var/log/nginx/access.log upstreaminfo if=$loggable; + {{ end }} error_log /var/log/nginx/error.log {{ $cfg.ErrorLogLevel }}; {{ buildResolvers $cfg.Resolver }} @@ -181,8 +190,8 @@ http { {{range $name, $upstream := $backends}} upstream {{$upstream.Name}} { - {{ if $cfg.EnableStickySessions }} - sticky hash=sha1 httponly; + {{ if eq $upstream.SessionAffinity.AffinityType "cookie" }} + sticky hash={{$upstream.SessionAffinity.CookieSessionAffinity.Hash}} name={{$upstream.SessionAffinity.CookieSessionAffinity.Name}} httponly; {{ else }} least_conn; {{ end }} @@ -201,9 +210,10 @@ http { {{ range $index, $server := .Servers }} server { server_name {{ $server.Hostname }}; - listen [::]:80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $index 0 }} ipv6only=off{{end}}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}}; + listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} reuseport backlog={{ $backlogSize }}{{end}}; {{/* Listen on 442 because port 443 is used in the stream section */}} - {{ if not (empty $server.SSLCertificate) }}listen 442 {{ if $cfg.UseProxyProtocol }}proxy_protocol{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }}; + {{/* This listen on port 442 cannot contains proxy_protocol directive because port 443 is in charge of decoding the protocol */}} + {{ if not (empty $server.SSLCertificate) }}listen {{ if gt (len $passthroughBackends) 0 }}442{{ else }}{{ if not $cfg.DisableIpv6 }}[::]:{{ end }}443 {{ if $cfg.UseProxyProtocol }} proxy_protocol {{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }}; {{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}} # PEM sha: {{ $server.SSLPemChecksum }} ssl_certificate {{ $server.SSLCertificate }}; @@ -220,29 +230,34 @@ http { {{ $path := buildLocation $location }} {{ $authPath := buildAuthLocation $location }} - {{ if not (empty $location.CertificateAuth.CertFileName) }} - # PEM sha: {{ $location.CertificateAuth.PemSHA }} - ssl_client_certificate {{ $location.CertificateAuth.CAFileName }}; + {{ if not (empty $location.CertificateAuth.AuthSSLCert.CAFileName) }} + # PEM sha: {{ $location.CertificateAuth.AuthSSLCert.PemSHA }} + ssl_client_certificate {{ $location.CertificateAuth.AuthSSLCert.CAFileName }}; ssl_verify_client on; + ssl_verify_depth {{ $location.CertificateAuth.ValidationDepth }}; {{ end }} {{ if not (empty $authPath) }} location = {{ $authPath }} { internal; + set $proxy_upstream_name "internal"; + {{ if not $location.ExternalAuth.SendBody }} proxy_pass_request_body off; proxy_set_header Content-Length ""; {{ end }} - {{ if not (empty $location.ExternalAuth.Method) }} + {{ if not (empty $location.ExternalAuth.Method) }} proxy_method {{ $location.ExternalAuth.Method }}; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header X-Scheme $pass_access_scheme; {{ end }} - proxy_set_header Host $host; + proxy_set_header Host $host; proxy_pass_request_headers on; set $target {{ $location.ExternalAuth.URL }}; proxy_pass $target; } {{ end }} - + location {{ $path }} { set $proxy_upstream_name "{{ $location.Backend }}"; @@ -252,17 +267,21 @@ http { allow {{ $ip }};{{ end }} deny all; {{ end }} - + port_in_redirect {{ if $location.UsePortInRedirects }}on{{ else }}off{{ end }}; {{ if not (empty $authPath) }} # this location requires authentication auth_request {{ $authPath }}; {{ end }} - - {{ if (and (not (empty $server.SSLCertificate)) $location.Redirect.SSLRedirect) }} + + {{ if not (empty $location.ExternalAuth.SigninURL) }} + error_page 401 = {{ $location.ExternalAuth.SigninURL }}; + {{ end }} + + {{ if (or $location.Redirect.ForceSSLRedirect (and (not (empty $server.SSLCertificate)) $location.Redirect.SSLRedirect)) }} # enforce ssl on server side - if ($scheme = http) { + if ($pass_access_scheme = http) { return 301 https://$host$request_uri; } {{ end }} @@ -270,7 +289,7 @@ http { {{ $limits := buildRateLimit $location }} {{ range $limit := $limits }} {{ $limit }}{{ end }} - + {{ if $location.BasicDigestAuth.Secured }} {{ if eq $location.BasicDigestAuth.Type "basic" }} auth_basic "{{ $location.BasicDigestAuth.Realm }}"; @@ -281,7 +300,7 @@ http { {{ end }} proxy_set_header Authorization ""; {{ end }} - + {{ if $location.EnableCORS }} {{ template "CORS" }} {{ end }} @@ -290,6 +309,11 @@ http { proxy_set_header Host $host; + # Pass the extracted client certificate to the backend + {{ if not (empty $location.CertificateAuth.AuthSSLCert.CAFileName) }} + proxy_set_header ssl-client-cert $ssl_client_cert; + {{ end }} + # Pass Real IP proxy_set_header X-Real-IP $remote_addr; @@ -301,11 +325,18 @@ http { proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Port $pass_port; proxy_set_header X-Forwarded-Proto $pass_access_scheme; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header X-Scheme $pass_access_scheme; # mitigate HTTPoxy Vulnerability # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/ proxy_set_header Proxy ""; + # Custom headers + {{ range $k, $v := $proxyHeaders }} + proxy_set_header {{ $k }} "{{ $v }}"; + {{ end }} + proxy_connect_timeout {{ $location.Proxy.ConnectTimeout }}s; proxy_send_timeout {{ $location.Proxy.SendTimeout }}s; proxy_read_timeout {{ $location.Proxy.ReadTimeout }}s; @@ -313,14 +344,21 @@ http { proxy_redirect off; proxy_buffering off; proxy_buffer_size "{{ $location.Proxy.BufferSize }}"; + proxy_buffers 4 "{{ $location.Proxy.BufferSize }}"; proxy_http_version 1.1; + proxy_cookie_domain {{ $location.Proxy.CookieDomain }}; + proxy_cookie_path {{ $location.Proxy.CookiePath }}; + {{/* rewrite only works if the content is not compressed */}} {{ if $location.Redirect.AddBaseURL }} proxy_set_header Accept-Encoding ""; {{ end }} + {{/* Add any additional configuration defined */}} + {{ $location.ConfigurationSnippet }} + {{ buildProxyPass $backends $location }} {{ else }} #{{ $location.Denied }} @@ -328,7 +366,7 @@ http { {{ end }} } {{ end }} - + {{ if eq $server.Hostname "_" }} # health checks in cloud providers require the use of port 80 location {{ $healthzURI }} { @@ -340,7 +378,7 @@ http { # with an external software (like sysdig) location /nginx_status { allow 127.0.0.1; - allow ::1; + {{ if not $cfg.DisableIpv6 }}allow ::1;{{ end }} deny all; access_log off; @@ -350,22 +388,24 @@ http { {{ template "CUSTOM_ERRORS" $cfg }} } - + {{ end }} - + # default server, used for NGINX healthcheck and access to nginx stats server { # Use the port 18080 (random value just to avoid known ports) as default port for nginx. # Changing this value requires a change in: # https://github.com/kubernetes/contrib/blob/master/ingress/controllers/nginx/nginx/command.go#L104 - listen [::]:18080 ipv6only=off default_server reuseport backlog={{ .BacklogSize }}; + listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}18080 {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} default_server reuseport backlog={{ .BacklogSize }}; location {{ $healthzURI }} { access_log off; return 200; } - + location /nginx_status { + set $proxy_upstream_name "internal"; + {{ if $cfg.EnableVtsStatus }} vhost_traffic_status_display; vhost_traffic_status_display_format html; @@ -379,8 +419,10 @@ http { # using prometheus. # TODO: enable extraction for vts module. location /internal_nginx_status { + set $proxy_upstream_name "internal"; + allow 127.0.0.1; - allow ::1; + {{ if not $cfg.DisableIpv6 }}allow ::1;{{ end }} deny all; access_log off; @@ -402,7 +444,7 @@ http { location / { {{ if .CustomErrors }} content_by_lua_block { - openURL(503) + openURL(ngx.req.get_headers(0), 503) } {{ else }} return 503; @@ -412,18 +454,24 @@ http { } stream { + {{ if gt (len $passthroughBackends) 0 }} # map FQDN that requires SSL passthrough map $ssl_preread_server_name $stream_upstream { {{ range $i, $passthrough := .PassthroughBackends }} {{ $passthrough.Hostname }} {{ $passthrough.Backend }}; {{ end }} - # send SSL traffic to this nginx in a different port + # send SSL traffic to this nginx in a different port default nginx-ssl-backend; } - log_format log_stream '$remote_addr [$time_local] $protocol [$ssl_preread_server_name] [$stream_upstream] $status $bytes_sent $bytes_received $session_time'; + log_format log_stream {{ $cfg.LogFormatStream }}; + {{ if $cfg.DisableAccessLog }} + access_log off; + {{ else }} access_log /var/log/nginx/access.log log_stream; + {{ end }} + error_log /var/log/nginx/error.log; # configure default backend for SSL @@ -431,34 +479,42 @@ stream { server 127.0.0.1:442; } - {{ buildSSPassthroughUpstreams $backends .PassthroughBackends }} + {{ buildSSLPassthroughUpstreams $backends .PassthroughBackends }} server { - listen [::]:443 ipv6only=off; - {{ if $cfg.UseProxyProtocol }}proxy_protocol on;{{ end }} + listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}443 {{ if not $cfg.DisableIpv6 }}ipv6only=off{{ end }}{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}; proxy_pass $stream_upstream; ssl_preread on; } - - {{ buildStreamUpstreams "tcp" $backends .TCPBackends }} - - {{ buildStreamUpstreams "udp" $backends .UDPBackends }} - - # TCP services - {{ range $i, $tcpServer := .TCPBackends }} - server { - listen {{ $tcpServer.Path }}; - proxy_pass tcp-{{ $tcpServer.Backend }}; - } {{ end }} - # UDP services - {{ range $i, $udpServer := .UDPBackends }} - server { - listen {{ $udpServer.Path }} udp; - proxy_responses 1; - proxy_pass udp-{{ $udpServer.Backend }}; - } + # TCP services + {{ range $i, $tcpServer := .TCPBackends }} + upstream {{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }} { + {{ range $j, $endpoint := $tcpServer.Endpoints }} + server {{ $endpoint.Address }}:{{ $endpoint.Port }}; + {{ end }} + } + + server { + listen {{ $tcpServer.Port }}; + proxy_pass {{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }}; + } + {{ end }} + + # UDP services + {{ range $i, $udpServer := .UDPBackends }} + upstream {{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }} { + {{ range $j, $endpoint := $udpServer.Endpoints }} + server {{ $endpoint.Address }}:{{ $endpoint.Port }}; + {{ end }} + } + + server { + listen {{ $udpServer.Port }}; + proxy_responses 1; + proxy_pass {{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }}; + } {{ end }} } @@ -468,9 +524,9 @@ stream { location @custom_{{ $errCode }} { internal; content_by_lua_block { - openURL({{ $errCode }}) + openURL(ngx.req.get_headers(0), {{ $errCode }}) } - } + } {{ end }} {{ end }} diff --git a/controllers/nginx/test/data/config.json b/controllers/nginx/test/data/config.json index 4aaa2dd11..087a8fe83 100644 --- a/controllers/nginx/test/data/config.json +++ b/controllers/nginx/test/data/config.json @@ -57134,57 +57134,7 @@ }] }], "sslDHParam": "", - "tcpBackends": [{ - "path": "2222", - "isDefBackend": false, - "backend": "default-echoheaders-2222", - "basicDigestAuth": { - "type": "", - "realm": "", - "file": "", - "secured": false - }, - "externalAuth": { - "url": "", - "method": "", - "sendBody": false - }, - "rateLimit": { - "connections": { - "name": "", - "limit": 0, - "burst": 0, - "sharedSize": 0 - }, - "rps": { - "name": "", - "limit": 0, - "burst": 0, - "sharedSize": 0 - } - }, - "redirect": { - "target": "", - "addBaseUrl": false, - "sslRedirect": false - }, - "whitelist": { - "cidr": null - }, - "proxy": { - "conectTimeout": 0, - "sendTimeout": 0, - "readTimeout": 0, - "bufferSize": "" - }, - "certificateAuth": { - "secret": "", - "certFilename": "", - "keyFilename": "", - "caFilename": "", - "pemSha": "" - } - }], + "tcpBackends": [], "udpBackends": [], "backends": [{ "name": "default-echoheaders-80", diff --git a/core/pkg/ingress/annotations/auth/main.go b/core/pkg/ingress/annotations/auth/main.go index 2921a550c..fe2167f03 100644 --- a/core/pkg/ingress/annotations/auth/main.go +++ b/core/pkg/ingress/annotations/auth/main.go @@ -20,6 +20,7 @@ import ( "fmt" "io/ioutil" "os" + "path" "regexp" "github.com/pkg/errors" @@ -59,8 +60,17 @@ type auth struct { // NewParser creates a new authentication annotation parser func NewParser(authDirectory string, sr resolver.Secret) parser.IngressAnnotation { - // TODO: check permissions required - os.MkdirAll(authDirectory, 0655) + os.MkdirAll(authDirectory, 0755) + + currPath := authDirectory + for currPath != "/" { + currPath = path.Dir(currPath) + err := os.Chmod(currPath, 0755) + if err != nil { + break + } + } + return auth{sr, authDirectory} } diff --git a/core/pkg/ingress/annotations/authreq/main.go b/core/pkg/ingress/annotations/authreq/main.go index 560a73868..91c56b9f6 100644 --- a/core/pkg/ingress/annotations/authreq/main.go +++ b/core/pkg/ingress/annotations/authreq/main.go @@ -28,16 +28,18 @@ import ( const ( // external URL that provides the authentication - authURL = "ingress.kubernetes.io/auth-url" - authMethod = "ingress.kubernetes.io/auth-method" - authBody = "ingress.kubernetes.io/auth-send-body" + authURL = "ingress.kubernetes.io/auth-url" + authSigninURL = "ingress.kubernetes.io/auth-signin" + authMethod = "ingress.kubernetes.io/auth-method" + authBody = "ingress.kubernetes.io/auth-send-body" ) // External returns external authentication configuration for an Ingress rule type External struct { - URL string `json:"url"` - Method string `json:"method"` - SendBody bool `json:"sendBody"` + URL string `json:"url"` + SigninURL string `json:"signinUrl"` + Method string `json:"method"` + SendBody bool `json:"sendBody"` } var ( @@ -77,6 +79,8 @@ func (a authReq) Parse(ing *extensions.Ingress) (interface{}, error) { return nil, ing_errors.NewLocationDenied("an empty string is not a valid URL") } + signin, _ := parser.GetStringAnnotation(authSigninURL, ing) + ur, err := url.Parse(str) if err != nil { return nil, err @@ -100,8 +104,9 @@ func (a authReq) Parse(ing *extensions.Ingress) (interface{}, error) { sb, _ := parser.GetBoolAnnotation(authBody, ing) return &External{ - URL: str, - Method: m, - SendBody: sb, + URL: str, + SigninURL: signin, + Method: m, + SendBody: sb, }, nil } diff --git a/core/pkg/ingress/annotations/authreq/main_test.go b/core/pkg/ingress/annotations/authreq/main_test.go index 696d8bdc0..75cd6d2b7 100644 --- a/core/pkg/ingress/annotations/authreq/main_test.go +++ b/core/pkg/ingress/annotations/authreq/main_test.go @@ -67,23 +67,25 @@ func TestAnnotations(t *testing.T) { ing.SetAnnotations(data) tests := []struct { - title string - url string - method string - sendBody bool - expErr bool + title string + url string + signinURL string + method string + sendBody bool + expErr bool }{ - {"empty", "", "", false, true}, - {"no scheme", "bar", "", false, true}, - {"invalid host", "http://", "", false, true}, - {"invalid host (multiple dots)", "http://foo..bar.com", "", false, true}, - {"valid URL", "http://bar.foo.com/external-auth", "", false, false}, - {"valid URL - send body", "http://foo.com/external-auth", "POST", true, false}, - {"valid URL - send body", "http://foo.com/external-auth", "GET", true, false}, + {"empty", "", "", "", false, true}, + {"no scheme", "bar", "bar", "", false, true}, + {"invalid host", "http://", "http://", "", false, true}, + {"invalid host (multiple dots)", "http://foo..bar.com", "http://foo..bar.com", "", false, true}, + {"valid URL", "http://bar.foo.com/external-auth", "http://bar.foo.com/external-auth", "", false, false}, + {"valid URL - send body", "http://foo.com/external-auth", "http://foo.com/external-auth", "POST", true, false}, + {"valid URL - send body", "http://foo.com/external-auth", "http://foo.com/external-auth", "GET", true, false}, } for _, test := range tests { data[authURL] = test.url + data[authSigninURL] = test.signinURL data[authBody] = fmt.Sprintf("%v", test.sendBody) data[authMethod] = fmt.Sprintf("%v", test.method) @@ -101,6 +103,9 @@ func TestAnnotations(t *testing.T) { if u.URL != test.url { t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.url, u.URL) } + if u.SigninURL != test.signinURL { + t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.signinURL, u.SigninURL) + } if u.Method != test.method { t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.method, u.Method) } diff --git a/core/pkg/ingress/annotations/authtls/main.go b/core/pkg/ingress/annotations/authtls/main.go index 143353249..c4172e51c 100644 --- a/core/pkg/ingress/annotations/authtls/main.go +++ b/core/pkg/ingress/annotations/authtls/main.go @@ -28,11 +28,16 @@ import ( const ( // name of the secret - authTLSSecret = "ingress.kubernetes.io/auth-tls-secret" + annotationAuthTLSSecret = "ingress.kubernetes.io/auth-tls-secret" + annotationAuthTLSDepth = "ingress.kubernetes.io/auth-tls-verify-depth" + defaultAuthTLSDepth = 1 ) -type authTLS struct { - certResolver resolver.AuthCertificate +// AuthSSLConfig contains the AuthSSLCert used for muthual autentication +// and the configured ValidationDepth +type AuthSSLConfig struct { + AuthSSLCert resolver.AuthSSLCert + ValidationDepth int `json:"validationDepth"` } // NewParser creates a new TLS authentication annotation parser @@ -40,29 +45,42 @@ func NewParser(resolver resolver.AuthCertificate) parser.IngressAnnotation { return authTLS{resolver} } -// ParseAnnotations parses the annotations contained in the ingress -// rule used to use an external URL as source for authentication +type authTLS struct { + certResolver resolver.AuthCertificate +} + +// Parse parses the annotations contained in the ingress +// rule used to use a Certificate as authentication method func (a authTLS) Parse(ing *extensions.Ingress) (interface{}, error) { - str, err := parser.GetStringAnnotation(authTLSSecret, ing) + + tlsauthsecret, err := parser.GetStringAnnotation(annotationAuthTLSSecret, ing) if err != nil { - return nil, err + return &AuthSSLConfig{}, err } - if str == "" { - return nil, ing_errors.NewLocationDenied("an empty string is not a valid secret name") + if tlsauthsecret == "" { + return &AuthSSLConfig{}, ing_errors.NewLocationDenied("an empty string is not a valid secret name") } - _, _, err = k8s.ParseNameNS(str) + _, _, err = k8s.ParseNameNS(tlsauthsecret) if err != nil { - return nil, ing_errors.NewLocationDenied("an empty string is not a valid secret name") + return &AuthSSLConfig{}, ing_errors.NewLocationDenied("an empty string is not a valid secret name") } - authCert, err := a.certResolver.GetAuthCertificate(str) + tlsdepth, err := parser.GetIntAnnotation(annotationAuthTLSDepth, ing) + if err != nil || tlsdepth == 0 { + tlsdepth = defaultAuthTLSDepth + } + + authCert, err := a.certResolver.GetAuthCertificate(tlsauthsecret) if err != nil { - return nil, ing_errors.LocationDenied{ + return &AuthSSLConfig{}, ing_errors.LocationDenied{ Reason: errors.Wrap(err, "error obtaining certificate"), } } - return authCert, nil + return &AuthSSLConfig{ + AuthSSLCert: *authCert, + ValidationDepth: tlsdepth, + }, nil } diff --git a/core/pkg/ingress/annotations/class/main.go b/core/pkg/ingress/annotations/class/main.go new file mode 100644 index 000000000..d9e862938 --- /dev/null +++ b/core/pkg/ingress/annotations/class/main.go @@ -0,0 +1,55 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package class + +import ( + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/apis/extensions" + + "k8s.io/ingress/core/pkg/ingress/annotations/parser" + "k8s.io/ingress/core/pkg/ingress/errors" +) + +const ( + // IngressKey picks a specific "class" for the Ingress. + // The controller only processes Ingresses with this annotation either + // unset, or set to either the configured value or the empty string. + IngressKey = "kubernetes.io/ingress.class" +) + +// IsValid returns true if the given Ingress either doesn't specify +// the ingress.class annotation, or it's set to the configured in the +// ingress controller. +func IsValid(ing *extensions.Ingress, controller, defClass string) bool { + ingress, err := parser.GetStringAnnotation(IngressKey, ing) + if err != nil && !errors.IsMissingAnnotations(err) { + glog.Warningf("unexpected error reading ingress annotation: %v", err) + } + + // we have 2 valid combinations + // 1 - ingress with default class | blank annotation on ingress + // 2 - ingress with specific class | same annotation on ingress + // + // and 2 invalid combinations + // 3 - ingress with default class | fixed annotation on ingress + // 4 - ingress with specific class | different annotation on ingress + if ingress == "" && controller == defClass { + return true + } + + return ingress == controller +} diff --git a/core/pkg/ingress/annotations/class/main_test.go b/core/pkg/ingress/annotations/class/main_test.go new file mode 100644 index 000000000..bf204052f --- /dev/null +++ b/core/pkg/ingress/annotations/class/main_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package class + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestIsValidClass(t *testing.T) { + tests := []struct { + ingress string + controller string + defClass string + isValid bool + }{ + {"", "", "nginx", true}, + {"", "nginx", "nginx", true}, + {"nginx", "nginx", "nginx", true}, + {"custom", "custom", "nginx", true}, + {"", "killer", "nginx", false}, + {"", "", "nginx", true}, + {"custom", "nginx", "nginx", false}, + } + + ing := &extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + } + + data := map[string]string{} + ing.SetAnnotations(data) + for _, test := range tests { + ing.Annotations[IngressKey] = test.ingress + b := IsValid(ing, test.controller, test.defClass) + if b != test.isValid { + t.Errorf("test %v - expected %v but %v was returned", test, test.isValid, b) + } + } +} diff --git a/core/pkg/ingress/annotations/proxy/main.go b/core/pkg/ingress/annotations/proxy/main.go index c1ee7e47b..ab7d2e07a 100644 --- a/core/pkg/ingress/annotations/proxy/main.go +++ b/core/pkg/ingress/annotations/proxy/main.go @@ -24,11 +24,13 @@ import ( ) const ( - bodySize = "ingress.kubernetes.io/proxy-body-size" - connect = "ingress.kubernetes.io/proxy-connect-timeout" - send = "ingress.kubernetes.io/proxy-send-timeout" - read = "ingress.kubernetes.io/proxy-read-timeout" - bufferSize = "ingress.kubernetes.io/proxy-buffer-size" + bodySize = "ingress.kubernetes.io/proxy-body-size" + connect = "ingress.kubernetes.io/proxy-connect-timeout" + send = "ingress.kubernetes.io/proxy-send-timeout" + read = "ingress.kubernetes.io/proxy-read-timeout" + bufferSize = "ingress.kubernetes.io/proxy-buffer-size" + cookiePath = "ingress.kubernetes.io/proxy-cookie-path" + cookieDomain = "ingress.kubernetes.io/proxy-cookie-domain" ) // Configuration returns the proxy timeout to use in the upstream server/s @@ -38,6 +40,8 @@ type Configuration struct { SendTimeout int `json:"sendTimeout"` ReadTimeout int `json:"readTimeout"` BufferSize string `json:"bufferSize"` + CookieDomain string `json:"cookieDomain"` + CookiePath string `json:"cookiePath"` } type proxy struct { @@ -73,10 +77,20 @@ func (a proxy) Parse(ing *extensions.Ingress) (interface{}, error) { bufs = defBackend.ProxyBufferSize } + cp, err := parser.GetStringAnnotation(cookiePath, ing) + if err != nil || cp == "" { + cp = defBackend.ProxyCookiePath + } + + cd, err := parser.GetStringAnnotation(cookieDomain, ing) + if err != nil || cd == "" { + cd = defBackend.ProxyCookieDomain + } + bs, err := parser.GetStringAnnotation(bodySize, ing) if err != nil || bs == "" { bs = defBackend.ProxyBodySize } - return &Configuration{bs, ct, st, rt, bufs}, nil + return &Configuration{bs, ct, st, rt, bufs, cd, cp}, nil } diff --git a/core/pkg/ingress/annotations/ratelimit/main.go b/core/pkg/ingress/annotations/ratelimit/main.go index f3ca328b7..31850a90b 100644 --- a/core/pkg/ingress/annotations/ratelimit/main.go +++ b/core/pkg/ingress/annotations/ratelimit/main.go @@ -36,10 +36,10 @@ const ( defSharedSize = 5 ) -// RateLimit returns rate limit configuration for an Ingress rule -// Is possible to limit the number of connections per IP address or -// connections per second. -// Note: Is possible to specify both limits +// RateLimit returns rate limit configuration for an Ingress rule limiting the +// number of connections per IP address and/or connections per second. +// If you both annotations are specified in a single Ingress rule, RPS limits +// takes precedence type RateLimit struct { // Connections indicates a limit with the number of connections per IP address Connections Zone `json:"connections"` diff --git a/core/pkg/ingress/annotations/rewrite/main.go b/core/pkg/ingress/annotations/rewrite/main.go index c7e57c253..999ef7844 100644 --- a/core/pkg/ingress/annotations/rewrite/main.go +++ b/core/pkg/ingress/annotations/rewrite/main.go @@ -24,9 +24,10 @@ import ( ) const ( - rewriteTo = "ingress.kubernetes.io/rewrite-target" - addBaseURL = "ingress.kubernetes.io/add-base-url" - sslRedirect = "ingress.kubernetes.io/ssl-redirect" + rewriteTo = "ingress.kubernetes.io/rewrite-target" + addBaseURL = "ingress.kubernetes.io/add-base-url" + sslRedirect = "ingress.kubernetes.io/ssl-redirect" + forceSSLRedirect = "ingress.kubernetes.io/force-ssl-redirect" ) // Redirect describes the per location redirect config @@ -38,6 +39,8 @@ type Redirect struct { AddBaseURL bool `json:"addBaseUrl"` // SSLRedirect indicates if the location section is accessible SSL only SSLRedirect bool `json:"sslRedirect"` + // ForceSSLRedirect indicates if the location section is accessible SSL only + ForceSSLRedirect bool `json:"forceSSLRedirect"` } type rewrite struct { @@ -52,19 +55,20 @@ func NewParser(br resolver.DefaultBackend) parser.IngressAnnotation { // ParseAnnotations parses the annotations contained in the ingress // rule used to rewrite the defined paths func (a rewrite) Parse(ing *extensions.Ingress) (interface{}, error) { - rt, err := parser.GetStringAnnotation(rewriteTo, ing) - if err != nil { - return nil, err - } - + rt, _ := parser.GetStringAnnotation(rewriteTo, ing) sslRe, err := parser.GetBoolAnnotation(sslRedirect, ing) if err != nil { sslRe = a.backendResolver.GetDefaultBackend().SSLRedirect } + fSslRe, err := parser.GetBoolAnnotation(forceSSLRedirect, ing) + if err != nil { + fSslRe = a.backendResolver.GetDefaultBackend().ForceSSLRedirect + } abu, _ := parser.GetBoolAnnotation(addBaseURL, ing) return &Redirect{ - Target: rt, - AddBaseURL: abu, - SSLRedirect: sslRe, + Target: rt, + AddBaseURL: abu, + SSLRedirect: sslRe, + ForceSSLRedirect: fSslRe, }, nil } diff --git a/core/pkg/ingress/annotations/rewrite/main_test.go b/core/pkg/ingress/annotations/rewrite/main_test.go index 56ba6a9b7..75daf01bc 100644 --- a/core/pkg/ingress/annotations/rewrite/main_test.go +++ b/core/pkg/ingress/annotations/rewrite/main_test.go @@ -76,8 +76,8 @@ func (m mockBackend) GetDefaultBackend() defaults.Backend { func TestWithoutAnnotations(t *testing.T) { ing := buildIngress() _, err := NewParser(mockBackend{}).Parse(ing) - if err == nil { - t.Error("Expected error with ingress without annotations") + if err != nil { + t.Errorf("unexpected error with ingress without annotations: %v", err) } } @@ -117,10 +117,6 @@ func TestSSLRedirect(t *testing.T) { t.Errorf("Expected true but returned false") } - if !redirect.SSLRedirect { - t.Errorf("Expected true but returned false") - } - data[sslRedirect] = "false" ing.SetAnnotations(data) @@ -133,3 +129,32 @@ func TestSSLRedirect(t *testing.T) { t.Errorf("Expected false but returned true") } } + +func TestForceSSLRedirect(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[rewriteTo] = defRoute + ing.SetAnnotations(data) + + i, _ := NewParser(mockBackend{true}).Parse(ing) + redirect, ok := i.(*Redirect) + if !ok { + t.Errorf("expected a Redirect type") + } + if redirect.ForceSSLRedirect { + t.Errorf("Expected false but returned true") + } + + data[forceSSLRedirect] = "true" + ing.SetAnnotations(data) + + i, _ = NewParser(mockBackend{false}).Parse(ing) + redirect, ok = i.(*Redirect) + if !ok { + t.Errorf("expected a Redirect type") + } + if !redirect.ForceSSLRedirect { + t.Errorf("Expected true but returned false") + } +} diff --git a/core/pkg/ingress/annotations/sessionaffinity/main.go b/core/pkg/ingress/annotations/sessionaffinity/main.go new file mode 100644 index 000000000..d506e844e --- /dev/null +++ b/core/pkg/ingress/annotations/sessionaffinity/main.go @@ -0,0 +1,118 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sessionaffinity + +import ( + "regexp" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/apis/extensions" + + "k8s.io/ingress/core/pkg/ingress/annotations/parser" +) + +const ( + annotationAffinityType = "ingress.kubernetes.io/affinity" + // If a cookie with this name exists, + // its value is used as an index into the list of available backends. + annotationAffinityCookieName = "ingress.kubernetes.io/session-cookie-name" + defaultAffinityCookieName = "INGRESSCOOKIE" + // This is the algorithm used by nginx to generate a value for the session cookie, if + // one isn't supplied and affinity is set to "cookie". + annotationAffinityCookieHash = "ingress.kubernetes.io/session-cookie-hash" + defaultAffinityCookieHash = "md5" +) + +var ( + affinityCookieHashRegex = regexp.MustCompile(`^(index|md5|sha1)$`) +) + +// AffinityConfig describes the per ingress session affinity config +type AffinityConfig struct { + // The type of affinity that will be used + AffinityType string `json:"type"` + CookieConfig +} + +// CookieConfig describes the Config of cookie type affinity +type CookieConfig struct { + // The name of the cookie that will be used in case of cookie affinity type. + Name string `json:"name"` + // The hash that will be used to encode the cookie in case of cookie affinity type + Hash string `json:"hash"` +} + +// CookieAffinityParse gets the annotation values related to Cookie Affinity +// It also sets default values when no value or incorrect value is found +func CookieAffinityParse(ing *extensions.Ingress) *CookieConfig { + + sn, err := parser.GetStringAnnotation(annotationAffinityCookieName, ing) + + if err != nil || sn == "" { + glog.V(3).Infof("Ingress %v: No value found in annotation %v. Using the default %v", ing.Name, annotationAffinityCookieName, defaultAffinityCookieName) + sn = defaultAffinityCookieName + } + + sh, err := parser.GetStringAnnotation(annotationAffinityCookieHash, ing) + + if err != nil || !affinityCookieHashRegex.MatchString(sh) { + glog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Setting it to default %v", ing.Name, annotationAffinityCookieHash, defaultAffinityCookieHash) + sh = defaultAffinityCookieHash + } + + return &CookieConfig{ + Name: sn, + Hash: sh, + } +} + +// NewParser creates a new Affinity annotation parser +func NewParser() parser.IngressAnnotation { + return affinity{} +} + +type affinity struct { +} + +// ParseAnnotations parses the annotations contained in the ingress +// rule used to configure the affinity directives +func (a affinity) Parse(ing *extensions.Ingress) (interface{}, error) { + + var cookieAffinityConfig *CookieConfig + cookieAffinityConfig = &CookieConfig{} + + // Check the type of affinity that will be used + at, err := parser.GetStringAnnotation(annotationAffinityType, ing) + if err != nil { + at = "" + } + + switch at { + case "cookie": + cookieAffinityConfig = CookieAffinityParse(ing) + + default: + glog.V(3).Infof("No default affinity was found for Ingress %v", ing.Name) + + } + return &AffinityConfig{ + AffinityType: at, + CookieConfig: *cookieAffinityConfig, + }, nil + +} diff --git a/core/pkg/ingress/annotations/sessionaffinity/main_test.go b/core/pkg/ingress/annotations/sessionaffinity/main_test.go new file mode 100644 index 000000000..3a3a17200 --- /dev/null +++ b/core/pkg/ingress/annotations/sessionaffinity/main_test.go @@ -0,0 +1,88 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sessionaffinity + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func buildIngress() *extensions.Ingress { + defaultBackend := extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + return &extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []extensions.IngressRule{ + { + Host: "foo.bar.com", + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: "/foo", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + } +} + +func TestIngressAffinityCookieConfig(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[annotationAffinityType] = "cookie" + data[annotationAffinityCookieHash] = "sha123" + data[annotationAffinityCookieName] = "INGRESSCOOKIE" + ing.SetAnnotations(data) + + affin, _ := NewParser().Parse(ing) + nginxAffinity, ok := affin.(*AffinityConfig) + if !ok { + t.Errorf("expected a Config type") + } + + if nginxAffinity.AffinityType != "cookie" { + t.Errorf("expected cookie as sticky-type but returned %v", nginxAffinity.AffinityType) + } + + if nginxAffinity.CookieConfig.Hash != "md5" { + t.Errorf("expected md5 as sticky-hash but returned %v", nginxAffinity.CookieConfig.Hash) + } + + if nginxAffinity.CookieConfig.Name != "INGRESSCOOKIE" { + t.Errorf("expected route as sticky-name but returned %v", nginxAffinity.CookieConfig.Name) + } +} diff --git a/core/pkg/ingress/annotations/snippet/main.go b/core/pkg/ingress/annotations/snippet/main.go new file mode 100644 index 000000000..d88cfbc32 --- /dev/null +++ b/core/pkg/ingress/annotations/snippet/main.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package snippet + +import ( + "k8s.io/kubernetes/pkg/apis/extensions" + + "k8s.io/ingress/core/pkg/ingress/annotations/parser" +) + +const ( + annotation = "ingress.kubernetes.io/configuration-snippet" +) + +type snippet struct { +} + +// NewParser creates a new CORS annotation parser +func NewParser() parser.IngressAnnotation { + return snippet{} +} + +// Parse parses the annotations contained in the ingress rule +// used to indicate if the location/s contains a fragment of +// configuration to be included inside the paths of the rules +func (a snippet) Parse(ing *extensions.Ingress) (interface{}, error) { + return parser.GetStringAnnotation(annotation, ing) +} diff --git a/core/pkg/ingress/annotations/snippet/main_test.go b/core/pkg/ingress/annotations/snippet/main_test.go new file mode 100644 index 000000000..269996a95 --- /dev/null +++ b/core/pkg/ingress/annotations/snippet/main_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package snippet + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestParse(t *testing.T) { + ap := NewParser() + if ap == nil { + t.Fatalf("expected a parser.IngressAnnotation but returned nil") + } + + testCases := []struct { + annotations map[string]string + expected string + }{ + {map[string]string{annotation: "more_headers"}, "more_headers"}, + {map[string]string{annotation: "false"}, "false"}, + {map[string]string{}, ""}, + {nil, ""}, + } + + ing := &extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.IngressSpec{}, + } + + for _, testCase := range testCases { + ing.SetAnnotations(testCase.annotations) + result, _ := ap.Parse(ing) + if result != testCase.expected { + t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, result, testCase.annotations) + } + } +} diff --git a/core/pkg/ingress/controller/annotations.go b/core/pkg/ingress/controller/annotations.go index c1eb09fbd..5a8beeadf 100644 --- a/core/pkg/ingress/controller/annotations.go +++ b/core/pkg/ingress/controller/annotations.go @@ -17,8 +17,11 @@ limitations under the License. package controller import ( + "fmt" + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/ingress/core/pkg/ingress/annotations/auth" @@ -33,6 +36,8 @@ import ( "k8s.io/ingress/core/pkg/ingress/annotations/ratelimit" "k8s.io/ingress/core/pkg/ingress/annotations/rewrite" "k8s.io/ingress/core/pkg/ingress/annotations/secureupstream" + "k8s.io/ingress/core/pkg/ingress/annotations/sessionaffinity" + "k8s.io/ingress/core/pkg/ingress/annotations/snippet" "k8s.io/ingress/core/pkg/ingress/annotations/sslpassthrough" "k8s.io/ingress/core/pkg/ingress/errors" "k8s.io/ingress/core/pkg/ingress/resolver" @@ -45,24 +50,28 @@ type extractorConfig interface { } type annotationExtractor struct { - annotations map[string]parser.IngressAnnotation + secretResolver resolver.Secret + annotations map[string]parser.IngressAnnotation } func newAnnotationExtractor(cfg extractorConfig) annotationExtractor { return annotationExtractor{ + cfg, map[string]parser.IngressAnnotation{ - "BasicDigestAuth": auth.NewParser(auth.AuthDirectory, cfg), - "ExternalAuth": authreq.NewParser(), - "CertificateAuth": authtls.NewParser(cfg), - "EnableCORS": cors.NewParser(), - "HealthCheck": healthcheck.NewParser(cfg), - "Whitelist": ipwhitelist.NewParser(cfg), - "UsePortInRedirects": portinredirect.NewParser(cfg), - "Proxy": proxy.NewParser(cfg), - "RateLimit": ratelimit.NewParser(), - "Redirect": rewrite.NewParser(cfg), - "SecureUpstream": secureupstream.NewParser(), - "SSLPassthrough": sslpassthrough.NewParser(), + "BasicDigestAuth": auth.NewParser(auth.AuthDirectory, cfg), + "ExternalAuth": authreq.NewParser(), + "CertificateAuth": authtls.NewParser(cfg), + "EnableCORS": cors.NewParser(), + "HealthCheck": healthcheck.NewParser(cfg), + "Whitelist": ipwhitelist.NewParser(cfg), + "UsePortInRedirects": portinredirect.NewParser(cfg), + "Proxy": proxy.NewParser(cfg), + "RateLimit": ratelimit.NewParser(), + "Redirect": rewrite.NewParser(cfg), + "SecureUpstream": secureupstream.NewParser(), + "SessionAffinity": sessionaffinity.NewParser(), + "SSLPassthrough": sslpassthrough.NewParser(), + "ConfigurationSnippet": snippet.NewParser(), }, } } @@ -96,9 +105,11 @@ func (e *annotationExtractor) Extract(ing *extensions.Ingress) map[string]interf } const ( - secureUpstream = "SecureUpstream" - healthCheck = "HealthCheck" - sslPassthrough = "SSLPassthrough" + secureUpstream = "SecureUpstream" + healthCheck = "HealthCheck" + sslPassthrough = "SSLPassthrough" + sessionAffinity = "SessionAffinity" + certificateAuth = "CertificateAuth" ) func (e *annotationExtractor) SecureUpstream(ing *extensions.Ingress) bool { @@ -115,3 +126,22 @@ func (e *annotationExtractor) SSLPassthrough(ing *extensions.Ingress) bool { val, _ := e.annotations[sslPassthrough].Parse(ing) return val.(bool) } + +func (e *annotationExtractor) SessionAffinity(ing *extensions.Ingress) *sessionaffinity.AffinityConfig { + val, _ := e.annotations[sessionAffinity].Parse(ing) + return val.(*sessionaffinity.AffinityConfig) +} + +func (e *annotationExtractor) ContainsCertificateAuth(ing *extensions.Ingress) bool { + val, _ := parser.GetStringAnnotation("ingress.kubernetes.io/auth-tls-secret", ing) + return val != "" +} + +func (e *annotationExtractor) CertificateAuthSecret(ing *extensions.Ingress) (*api.Secret, error) { + val, _ := parser.GetStringAnnotation("ingress.kubernetes.io/auth-tls-secret", ing) + if val == "" { + return nil, fmt.Errorf("ingress rule %v/%v does not contains the auth-tls-secret annotation", ing.Namespace, ing.Name) + } + + return e.secretResolver.GetSecret(val) +} diff --git a/core/pkg/ingress/controller/annotations_test.go b/core/pkg/ingress/controller/annotations_test.go index f577920db..1b3b9b08d 100644 --- a/core/pkg/ingress/controller/annotations_test.go +++ b/core/pkg/ingress/controller/annotations_test.go @@ -28,10 +28,13 @@ import ( ) const ( - annotation_secureUpstream = "ingress.kubernetes.io/secure-backends" - annotation_upsMaxFails = "ingress.kubernetes.io/upstream-max-fails" - annotation_upsFailTimeout = "ingress.kubernetes.io/upstream-fail-timeout" - annotation_passthrough = "ingress.kubernetes.io/ssl-passthrough" + annotationSecureUpstream = "ingress.kubernetes.io/secure-backends" + annotationUpsMaxFails = "ingress.kubernetes.io/upstream-max-fails" + annotationUpsFailTimeout = "ingress.kubernetes.io/upstream-fail-timeout" + annotationPassthrough = "ingress.kubernetes.io/ssl-passthrough" + annotationAffinityType = "ingress.kubernetes.io/affinity" + annotationAffinityCookieName = "ingress.kubernetes.io/session-cookie-name" + annotationAffinityCookieHash = "ingress.kubernetes.io/session-cookie-hash" ) type mockCfg struct { @@ -106,9 +109,9 @@ func TestSecureUpstream(t *testing.T) { annotations map[string]string er bool }{ - {map[string]string{annotation_secureUpstream: "true"}, true}, - {map[string]string{annotation_secureUpstream: "false"}, false}, - {map[string]string{annotation_secureUpstream + "_no": "true"}, false}, + {map[string]string{annotationSecureUpstream: "true"}, true}, + {map[string]string{annotationSecureUpstream: "false"}, false}, + {map[string]string{annotationSecureUpstream + "_no": "true"}, false}, {map[string]string{}, false}, {nil, false}, } @@ -131,9 +134,9 @@ func TestHealthCheck(t *testing.T) { eumf int euft int }{ - {map[string]string{annotation_upsMaxFails: "3", annotation_upsFailTimeout: "10"}, 3, 10}, - {map[string]string{annotation_upsMaxFails: "3"}, 3, 0}, - {map[string]string{annotation_upsFailTimeout: "10"}, 0, 10}, + {map[string]string{annotationUpsMaxFails: "3", annotationUpsFailTimeout: "10"}, 3, 10}, + {map[string]string{annotationUpsMaxFails: "3"}, 3, 0}, + {map[string]string{annotationUpsFailTimeout: "10"}, 0, 10}, {map[string]string{}, 0, 0}, {nil, 0, 0}, } @@ -164,9 +167,9 @@ func TestSSLPassthrough(t *testing.T) { annotations map[string]string er bool }{ - {map[string]string{annotation_passthrough: "true"}, true}, - {map[string]string{annotation_passthrough: "false"}, false}, - {map[string]string{annotation_passthrough + "_no": "true"}, false}, + {map[string]string{annotationPassthrough: "true"}, true}, + {map[string]string{annotationPassthrough: "false"}, false}, + {map[string]string{annotationPassthrough + "_no": "true"}, false}, {map[string]string{}, false}, {nil, false}, } @@ -179,3 +182,39 @@ func TestSSLPassthrough(t *testing.T) { } } } + +func TestAffinitySession(t *testing.T) { + ec := newAnnotationExtractor(mockCfg{}) + ing := buildIngress() + + fooAnns := []struct { + annotations map[string]string + affinitytype string + hash string + name string + }{ + {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieHash: "md5", annotationAffinityCookieName: "route"}, "cookie", "md5", "route"}, + {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieHash: "xpto", annotationAffinityCookieName: "route1"}, "cookie", "md5", "route1"}, + {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieHash: "", annotationAffinityCookieName: ""}, "cookie", "md5", "INGRESSCOOKIE"}, + {map[string]string{}, "", "", ""}, + {nil, "", "", ""}, + } + + for _, foo := range fooAnns { + ing.SetAnnotations(foo.annotations) + r := ec.SessionAffinity(ing) + t.Logf("Testing pass %v %v %v", foo.affinitytype, foo.hash, foo.name) + if r == nil { + t.Errorf("Returned nil but expected a SessionAffinity.AffinityConfig") + continue + } + + if r.CookieConfig.Hash != foo.hash { + t.Errorf("Returned %v but expected %v for Hash", r.CookieConfig.Hash, foo.hash) + } + + if r.CookieConfig.Name != foo.name { + t.Errorf("Returned %v but expected %v for Name", r.CookieConfig.Name, foo.name) + } + } +} diff --git a/core/pkg/ingress/controller/backend_ssl.go b/core/pkg/ingress/controller/backend_ssl.go index 92c32fc8c..e75d8ff0e 100644 --- a/core/pkg/ingress/controller/backend_ssl.go +++ b/core/pkg/ingress/controller/backend_ssl.go @@ -18,6 +18,7 @@ package controller import ( "fmt" + "reflect" "strings" "time" @@ -43,28 +44,9 @@ func (ic *GenericController) syncSecret(k interface{}) error { return fmt.Errorf("deferring sync till endpoints controller has synced") } - // check if the default certificate is configured - key := fmt.Sprintf("default/%v", defServerName) - _, exists := ic.sslCertTracker.Get(key) + var key string var cert *ingress.SSLCert var err error - if !exists { - if ic.cfg.DefaultSSLCertificate != "" { - cert, err = ic.getPemCertificate(ic.cfg.DefaultSSLCertificate) - if err != nil { - return err - } - } else { - defCert, defKey := ssl.GetFakeSSLCert() - cert, err = ssl.AddOrUpdateCertAndKey("system-snake-oil-certificate", defCert, defKey, []byte{}) - if err != nil { - return nil - } - } - cert.Name = defServerName - cert.Namespace = api.NamespaceDefault - ic.sslCertTracker.Add(key, cert) - } key = k.(string) @@ -87,40 +69,52 @@ func (ic *GenericController) syncSecret(k interface{}) error { } // create certificates and add or update the item in the store - _, exists = ic.sslCertTracker.Get(key) + cur, exists := ic.sslCertTracker.Get(key) if exists { - glog.V(3).Infof("updating secret %v/%v in the store ", sec.Namespace, sec.Name) + s := cur.(*ingress.SSLCert) + if reflect.DeepEqual(s, cert) { + // no need to update + return nil + } + glog.V(3).Infof("updating secret %v/%v in the store", sec.Namespace, sec.Name) ic.sslCertTracker.Update(key, cert) return nil } - glog.V(3).Infof("adding secret %v/%v to the store ", sec.Namespace, sec.Name) + glog.V(3).Infof("adding secret %v/%v to the store", sec.Namespace, sec.Name) ic.sslCertTracker.Add(key, cert) return nil } +// getPemCertificate receives a secret, and creates a ingress.SSLCert as return. +// It parses the secret and verifies if it's a keypair, or a 'ca.crt' secret only. func (ic *GenericController) getPemCertificate(secretName string) (*ingress.SSLCert, error) { secretInterface, exists, err := ic.secrLister.Store.GetByKey(secretName) if err != nil { - return nil, fmt.Errorf("error retriveing secret %v: %v", secretName, err) + return nil, fmt.Errorf("error retrieving secret %v: %v", secretName, err) } if !exists { - return nil, fmt.Errorf("secret named %v does not exists", secretName) + return nil, fmt.Errorf("secret named %v does not exist", secretName) } secret := secretInterface.(*api.Secret) - cert, ok := secret.Data[api.TLSCertKey] - if !ok { - return nil, fmt.Errorf("secret named %v has no private key", secretName) - } - key, ok := secret.Data[api.TLSPrivateKeyKey] - if !ok { - return nil, fmt.Errorf("secret named %v has no cert", secretName) - } + cert, okcert := secret.Data[api.TLSCertKey] + key, okkey := secret.Data[api.TLSPrivateKeyKey] ca := secret.Data["ca.crt"] nsSecName := strings.Replace(secretName, "/", "-", -1) - s, err := ssl.AddOrUpdateCertAndKey(nsSecName, cert, key, ca) + + var s *ingress.SSLCert + if okcert && okkey { + glog.V(3).Infof("found certificate and private key, configuring %v as a TLS Secret", secretName) + s, err = ssl.AddOrUpdateCertAndKey(nsSecName, cert, key, ca) + } else if ca != nil { + glog.V(3).Infof("found only ca.crt, configuring %v as an Certificate Authentication secret", secretName) + s, err = ssl.AddCertAuth(nsSecName, ca) + } else { + return nil, fmt.Errorf("ko keypair or CA cert could be found in %v", secretName) + } + if err != nil { return nil, err } @@ -134,10 +128,14 @@ func (ic *GenericController) getPemCertificate(secretName string) (*ingress.SSLC func (ic *GenericController) secrReferenced(name, namespace string) bool { for _, ingIf := range ic.ingLister.Store.List() { ing := ingIf.(*extensions.Ingress) - str, err := parser.GetStringAnnotation("ingress.kubernetes.io/auth-tls-secret", ing) - if err == nil && str == fmt.Sprintf("%v/%v", namespace, name) { - return true + + if ic.annotations.ContainsCertificateAuth(ing) { + str, _ := parser.GetStringAnnotation("ingress.kubernetes.io/auth-tls-secret", ing) + if str == fmt.Sprintf("%v/%v", namespace, name) { + return true + } } + if ing.Namespace != namespace { continue } diff --git a/core/pkg/ingress/controller/controller.go b/core/pkg/ingress/controller/controller.go index 146a00aa3..bbb96df36 100644 --- a/core/pkg/ingress/controller/controller.go +++ b/core/pkg/ingress/controller/controller.go @@ -18,6 +18,7 @@ package controller import ( "fmt" + "os" "reflect" "sort" "strconv" @@ -40,6 +41,7 @@ import ( cache_store "k8s.io/ingress/core/pkg/cache" "k8s.io/ingress/core/pkg/ingress" + "k8s.io/ingress/core/pkg/ingress/annotations/class" "k8s.io/ingress/core/pkg/ingress/annotations/healthcheck" "k8s.io/ingress/core/pkg/ingress/annotations/proxy" "k8s.io/ingress/core/pkg/ingress/annotations/service" @@ -57,11 +59,6 @@ const ( defServerName = "_" podStoreSyncedPollPeriod = 1 * time.Second rootLocation = "/" - - // ingressClassKey picks a specific "class" for the Ingress. The controller - // only processes Ingresses with this annotation either unset, or set - // to either the configured value or the empty string. - ingressClassKey = "kubernetes.io/ingress.class" ) var ( @@ -76,11 +73,13 @@ type GenericController struct { ingController *cache.Controller endpController *cache.Controller svcController *cache.Controller + nodeController *cache.Controller secrController *cache.Controller mapController *cache.Controller ingLister cache_store.StoreToIngressLister svcLister cache.StoreToServiceLister + nodeLister cache.StoreToNodeLister endpLister cache.StoreToEndpointsLister secrLister cache_store.StoreToSecretsLister mapLister cache_store.StoreToConfigmapLister @@ -125,6 +124,7 @@ type Configuration struct { UDPConfigMapName string DefaultSSLCertificate string DefaultHealthzURL string + DefaultIngressClass string // optional PublishService string // Backend is the particular implementation to be used. @@ -132,6 +132,7 @@ type Configuration struct { Backend ingress.Controller UpdateStatus bool + ElectionID string } // newIngressController creates an Ingress controller @@ -163,17 +164,23 @@ func newIngressController(config *Configuration) *GenericController { ingEventHandler := cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { addIng := obj.(*extensions.Ingress) - if !IsValidClass(addIng, config.IngressClass) { - glog.Infof("ignoring add for ingress %v based on annotation %v", addIng.Name, ingressClassKey) + if !class.IsValid(addIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) { + glog.Infof("ignoring add for ingress %v based on annotation %v", addIng.Name, class.IngressKey) return } ic.recorder.Eventf(addIng, api.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", addIng.Namespace, addIng.Name)) ic.syncQueue.Enqueue(obj) + if ic.annotations.ContainsCertificateAuth(addIng) { + s, err := ic.annotations.CertificateAuthSecret(addIng) + if err == nil { + ic.syncSecret(fmt.Sprintf("%v/%v", s.Namespace, s.Name)) + } + } }, DeleteFunc: func(obj interface{}) { delIng := obj.(*extensions.Ingress) - if !IsValidClass(delIng, config.IngressClass) { - glog.Infof("ignoring add for ingress %v based on annotation %v", delIng.Name, ingressClassKey) + if !class.IsValid(delIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) { + glog.Infof("ignoring delete for ingress %v based on annotation %v", delIng.Name, class.IngressKey) return } ic.recorder.Eventf(delIng, api.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", delIng.Namespace, delIng.Name)) @@ -182,7 +189,8 @@ func newIngressController(config *Configuration) *GenericController { UpdateFunc: func(old, cur interface{}) { oldIng := old.(*extensions.Ingress) curIng := cur.(*extensions.Ingress) - if !IsValidClass(curIng, config.IngressClass) { + if !class.IsValid(curIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) && + !class.IsValid(oldIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) { return } @@ -207,6 +215,13 @@ func newIngressController(config *Configuration) *GenericController { }() } } + if ic.annotations.ContainsCertificateAuth(upIng) { + s, err := ic.annotations.CertificateAuthSecret(upIng) + if err == nil { + ic.syncSecret(fmt.Sprintf("%v/%v", s.Namespace, s.Name)) + } + } + ic.syncQueue.Enqueue(cur) } }, @@ -278,11 +293,11 @@ func newIngressController(config *Configuration) *GenericController { &api.Endpoints{}, ic.cfg.ResyncPeriod, eventHandler) ic.secrLister.Store, ic.secrController = cache.NewInformer( - cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "secrets", ic.cfg.Namespace, fields.Everything()), + cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "secrets", api.NamespaceAll, fields.Everything()), &api.Secret{}, ic.cfg.ResyncPeriod, secrEventHandler) ic.mapLister.Store, ic.mapController = cache.NewInformer( - cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "configmaps", ic.cfg.Namespace, fields.Everything()), + cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "configmaps", api.NamespaceAll, fields.Everything()), &api.ConfigMap{}, ic.cfg.ResyncPeriod, mapEventHandler) ic.svcLister.Indexer, ic.svcController = cache.NewIndexerInformer( @@ -292,11 +307,18 @@ func newIngressController(config *Configuration) *GenericController { cache.ResourceEventHandlerFuncs{}, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + ic.nodeLister.Store, ic.nodeController = cache.NewInformer( + cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()), + &api.Node{}, ic.cfg.ResyncPeriod, eventHandler) + if config.UpdateStatus { ic.syncStatus = status.NewStatusSyncer(status.Config{ - Client: config.Client, - PublishService: ic.cfg.PublishService, - IngressLister: ic.ingLister, + Client: config.Client, + PublishService: ic.cfg.PublishService, + IngressLister: ic.ingLister, + ElectionID: config.ElectionID, + IngressClass: config.IngressClass, + DefaultIngressClass: config.DefaultIngressClass, }) } else { glog.Warning("Update of ingress status is disabled (flag --update-status=false was specified)") @@ -304,6 +326,15 @@ func newIngressController(config *Configuration) *GenericController { ic.annotations = newAnnotationExtractor(ic) + ic.cfg.Backend.SetListers(ingress.StoreLister{ + Ingress: ic.ingLister, + Service: ic.svcLister, + Node: ic.nodeLister, + Endpoint: ic.endpLister, + Secret: ic.secrLister, + ConfigMap: ic.mapLister, + }) + return &ic } @@ -330,7 +361,7 @@ func (ic GenericController) GetDefaultBackend() defaults.Backend { return ic.cfg.Backend.BackendDefaults() } -// GetSecret searchs for a secret in the local secrets Store +// GetSecret searches for a secret in the local secrets Store func (ic GenericController) GetSecret(name string) (*api.Secret, error) { s, exists, err := ic.secrLister.Store.GetByKey(name) if err != nil { @@ -390,8 +421,8 @@ func (ic *GenericController) sync(key interface{}) error { data, err := ic.cfg.Backend.OnUpdate(ingress.Configuration{ Backends: upstreams, Servers: servers, - TCPEndpoints: ic.getTCPServices(), - UPDEndpoints: ic.getUDPServices(), + TCPEndpoints: ic.getStreamServices(ic.cfg.TCPConfigMapName, api.ProtocolTCP), + UDPEndpoints: ic.getStreamServices(ic.cfg.UDPConfigMapName, api.ProtocolUDP), PassthroughBackends: passUpstreams, }) if err != nil { @@ -411,54 +442,32 @@ func (ic *GenericController) sync(key interface{}) error { return nil } -func (ic *GenericController) getTCPServices() []*ingress.Location { - if ic.cfg.TCPConfigMapName == "" { - // no configmap for TCP services - return []*ingress.Location{} +func (ic *GenericController) getStreamServices(configmapName string, proto api.Protocol) []ingress.L4Service { + glog.V(3).Infof("obtaining information about stream services of type %v located in configmap %v", proto, configmapName) + if configmapName == "" { + // no configmap configured + return []ingress.L4Service{} } - ns, name, err := k8s.ParseNameNS(ic.cfg.TCPConfigMapName) + ns, name, err := k8s.ParseNameNS(configmapName) if err != nil { - glog.Warningf("%v", err) - return []*ingress.Location{} + glog.Errorf("unexpected error reading configmap %v: %v", name, err) + return []ingress.L4Service{} } - tcpMap, err := ic.getConfigMap(ns, name) + + configmap, err := ic.getConfigMap(ns, name) if err != nil { - glog.V(5).Infof("no configured tcp services found: %v", err) - return []*ingress.Location{} + glog.Errorf("unexpected error reading configmap %v: %v", name, err) + return []ingress.L4Service{} } - return ic.getStreamServices(tcpMap.Data, api.ProtocolTCP) -} - -func (ic *GenericController) getUDPServices() []*ingress.Location { - if ic.cfg.UDPConfigMapName == "" { - // no configmap for TCP services - return []*ingress.Location{} - } - - ns, name, err := k8s.ParseNameNS(ic.cfg.UDPConfigMapName) - if err != nil { - glog.Warningf("%v", err) - return []*ingress.Location{} - } - tcpMap, err := ic.getConfigMap(ns, name) - if err != nil { - glog.V(3).Infof("no configured tcp services found: %v", err) - return []*ingress.Location{} - } - - return ic.getStreamServices(tcpMap.Data, api.ProtocolUDP) -} - -func (ic *GenericController) getStreamServices(data map[string]string, proto api.Protocol) []*ingress.Location { - var svcs []*ingress.Location + var svcs []ingress.L4Service // k -> port to expose // v -> /: - for k, v := range data { - _, err := strconv.Atoi(k) + for k, v := range configmap.Data { + externalPort, err := strconv.Atoi(k) if err != nil { - glog.Warningf("%v is not valid as a TCP port", k) + glog.Warningf("%v is not valid as a TCP/UDP port", k) continue } @@ -499,6 +508,7 @@ func (ic *GenericController) getStreamServices(data map[string]string, proto api var endps []ingress.Endpoint targetPort, err := strconv.Atoi(svcPort) if err != nil { + glog.V(3).Infof("searching service %v/%v endpoints using the name '%v'", svcNs, svcName, svcPort) for _, sp := range svc.Spec.Ports { if sp.Name == svcPort { endps = ic.getEndpoints(svc, sp.TargetPort, proto, &healthcheck.Upstream{}) @@ -507,6 +517,7 @@ func (ic *GenericController) getStreamServices(data map[string]string, proto api } } else { // we need to use the TargetPort (where the endpoints are running) + glog.V(3).Infof("searching service %v/%v endpoints using the target port '%v'", svcNs, svcName, targetPort) for _, sp := range svc.Spec.Ports { if sp.Port == int32(targetPort) { endps = ic.getEndpoints(svc, sp.TargetPort, proto, &healthcheck.Upstream{}) @@ -515,18 +526,22 @@ func (ic *GenericController) getStreamServices(data map[string]string, proto api } } - sort.Sort(ingress.EndpointByAddrPort(endps)) - - // tcp upstreams cannot contain empty upstreams and there is no - // default backend equivalent for TCP + // stream services cannot contain empty upstreams and there is no + // default backend equivalent if len(endps) == 0 { - glog.Warningf("service %v/%v does not have any active endpoints", svcNs, svcName) + glog.Warningf("service %v/%v does not have any active endpoints for port %v and protocol %v", svcNs, svcName, svcPort, proto) continue } - svcs = append(svcs, &ingress.Location{ - Path: k, - Backend: fmt.Sprintf("%v-%v-%v", svcNs, svcName, svcPort), + svcs = append(svcs, ingress.L4Service{ + Port: externalPort, + Backend: ingress.L4Backend{ + Name: svcName, + Namespace: svcNs, + Port: intstr.FromString(svcPort), + Protocol: proto, + }, + Endpoints: endps, }) } @@ -549,7 +564,7 @@ func (ic *GenericController) getDefaultUpstream() *ingress.Backend { } if !svcExists { - glog.Warningf("service %v does not exists", svcKey) + glog.Warningf("service %v does not exist", svcKey) upstream.Endpoints = append(upstream.Endpoints, newDefaultServer()) return upstream } @@ -587,6 +602,10 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress for _, ingIf := range ings { ing := ingIf.(*extensions.Ingress) + if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) { + continue + } + anns := ic.annotations.Extract(ing) for _, rule := range ing.Spec.Rules { @@ -599,31 +618,9 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress server = servers[defServerName] } - // use default upstream - defBackend := upstreams[defUpstreamName] - // we need to check if the spec contains the default backend - if ing.Spec.Backend != nil { - glog.V(3).Infof("ingress rule %v/%v defines a default Backend %v/%v", - ing.Namespace, - ing.Name, - ing.Spec.Backend.ServiceName, - ing.Spec.Backend.ServicePort.String()) - - name := fmt.Sprintf("%v-%v-%v", - ing.GetNamespace(), - ing.Spec.Backend.ServiceName, - ing.Spec.Backend.ServicePort.String()) - - if defUps, ok := upstreams[name]; ok { - defBackend = defUps - } - } - if rule.HTTP == nil && - len(ing.Spec.TLS) == 0 && host != defServerName { - glog.V(3).Infof("ingress rule %v/%v does not contains HTTP or TLS rules. using default backend", ing.Namespace, ing.Name) - server.Locations[0].Backend = defBackend.Name + glog.V(3).Infof("ingress rule %v/%v does not contains HTTP rules. using default backend", ing.Namespace, ing.Name) continue } @@ -683,7 +680,6 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress glog.V(3).Infof("upstream %v does not have any active endpoints. Using default backend", value.Name) value.Endpoints = append(value.Endpoints, newDefaultServer()) } - sort.Sort(ingress.EndpointByAddrPort(value.Endpoints)) aUpstreams = append(aUpstreams, value) } sort.Sort(ingress.BackendByNameServers(aUpstreams)) @@ -700,16 +696,23 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress // GetAuthCertificate ... func (ic GenericController) GetAuthCertificate(secretName string) (*resolver.AuthSSLCert, error) { + key, err := ic.GetSecret(secretName) + if err != nil { + return &resolver.AuthSSLCert{}, fmt.Errorf("unexpected error: %v", err) + } + if key != nil { + ic.secretQueue.Enqueue(key) + } + bc, exists := ic.sslCertTracker.Get(secretName) if !exists { - return &resolver.AuthSSLCert{}, fmt.Errorf("secret %v does not exists", secretName) + return &resolver.AuthSSLCert{}, fmt.Errorf("secret %v does not exist", secretName) } cert := bc.(*ingress.SSLCert) return &resolver.AuthSSLCert{ - Secret: secretName, - CertFileName: cert.PemFileName, - CAFileName: cert.CAFileName, - PemSHA: cert.PemSHA, + Secret: secretName, + CAFileName: cert.CAFileName, + PemSHA: cert.PemSHA, }, nil } @@ -722,8 +725,13 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing for _, ingIf := range data { ing := ingIf.(*extensions.Ingress) + if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) { + continue + } + secUpstream := ic.annotations.SecureUpstream(ing) hz := ic.annotations.HealthCheck(ing) + affinity := ic.annotations.SessionAffinity(ing) var defBackend string if ing.Spec.Backend != nil { @@ -763,6 +771,14 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing if !upstreams[name].Secure { upstreams[name].Secure = secUpstream } + if upstreams[name].SessionAffinity.AffinityType == "" { + upstreams[name].SessionAffinity.AffinityType = affinity.AffinityType + if affinity.AffinityType == "cookie" { + upstreams[name].SessionAffinity.CookieSessionAffinity.Name = affinity.CookieConfig.Name + upstreams[name].SessionAffinity.CookieSessionAffinity.Hash = affinity.CookieConfig.Hash + } + } + svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) endp, err := ic.serviceEndpoints(svcKey, path.Backend.ServicePort.String(), hz) if err != nil { @@ -789,7 +805,7 @@ func (ic *GenericController) serviceEndpoints(svcKey, backendPort string, } if !svcExists { - err = fmt.Errorf("service %v does not exists", svcKey) + err = fmt.Errorf("service %v does not exist", svcKey) return upstreams, err } @@ -806,6 +822,7 @@ func (ic *GenericController) serviceEndpoints(svcKey, backendPort string, glog.Warningf("service %v does not have any active endpoints", svcKey) } + sort.Sort(ingress.EndpointByAddrPort(endps)) upstreams = append(upstreams, endps...) break } @@ -814,7 +831,12 @@ func (ic *GenericController) serviceEndpoints(svcKey, backendPort string, return upstreams, nil } -func (ic *GenericController) createServers(data []interface{}, upstreams map[string]*ingress.Backend) map[string]*ingress.Server { +// createServers initializes a map that contains information about the list of +// FDQN referenced by ingress rules and the common name field in the referenced +// SSL certificates. Each server is configured with location / using a default +// backend specified by the user or the one inside the ingress spec. +func (ic *GenericController) createServers(data []interface{}, + upstreams map[string]*ingress.Backend) map[string]*ingress.Server { servers := make(map[string]*ingress.Server) bdef := ic.GetDefaultBackend() @@ -824,30 +846,41 @@ func (ic *GenericController) createServers(data []interface{}, upstreams map[str SendTimeout: bdef.ProxySendTimeout, ReadTimeout: bdef.ProxyReadTimeout, BufferSize: bdef.ProxyBufferSize, + CookieDomain: bdef.ProxyCookieDomain, + CookiePath: bdef.ProxyCookiePath, } - dun := ic.getDefaultUpstream().Name - - // This adds the Default Certificate to Default Backend and also for vhosts missing the secret + // This adds the Default Certificate to Default Backend (or generates a new self signed one) var defaultPemFileName, defaultPemSHA string + + // Tries to fetch the default Certificate. If it does not exists, generate a new self signed one. defaultCertificate, err := ic.getPemCertificate(ic.cfg.DefaultSSLCertificate) - // If no default Certificate was supplied, tries to generate a new dumb one if err != nil { - var cert *ingress.SSLCert - defCert, defKey := ssl.GetFakeSSLCert() - cert, err = ssl.AddOrUpdateCertAndKey("system-snake-oil-certificate", defCert, defKey, []byte{}) + // This means the Default Secret does not exists, so we will create a new one. + fakeCertificate := "default-fake-certificate" + fakeCertificatePath := fmt.Sprintf("%v/%v.pem", ingress.DefaultSSLDirectory, fakeCertificate) + + // Only generates a new certificate if it doesn't exists physically + _, err := os.Stat(fakeCertificatePath) if err != nil { - glog.Fatalf("Error generating self signed certificate: %v", err) + glog.V(3).Infof("No Default SSL Certificate found. Generating a new one") + defCert, defKey := ssl.GetFakeSSLCert() + defaultCertificate, err = ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{}) + if err != nil { + glog.Fatalf("Error generating self signed certificate: %v", err) + } + defaultPemFileName = defaultCertificate.PemFileName + defaultPemSHA = defaultCertificate.PemSHA } else { - defaultPemFileName = cert.PemFileName - defaultPemSHA = cert.PemSHA + defaultPemFileName = fakeCertificatePath + defaultPemSHA = ssl.PemSHA1(fakeCertificatePath) } } else { defaultPemFileName = defaultCertificate.PemFileName defaultPemSHA = defaultCertificate.PemSHA } - // default server + // initialize the default server servers[defServerName] = &ingress.Server{ Hostname: defServerName, SSLCertificate: defaultPemFileName, @@ -856,7 +889,7 @@ func (ic *GenericController) createServers(data []interface{}, upstreams map[str { Path: rootLocation, IsDefBackend: true, - Backend: dun, + Backend: ic.getDefaultUpstream().Name, Proxy: ngxProxy, }, }} @@ -864,8 +897,20 @@ func (ic *GenericController) createServers(data []interface{}, upstreams map[str // initialize all the servers for _, ingIf := range data { ing := ingIf.(*extensions.Ingress) + if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) { + continue + } + // check if ssl passthrough is configured sslpt := ic.annotations.SSLPassthrough(ing) + dun := ic.getDefaultUpstream().Name + if ing.Spec.Backend != nil { + // replace default backend + defUpstream := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort.String()) + if backendUpstream, ok := upstreams[defUpstream]; ok { + dun = backendUpstream.Name + } + } for _, rule := range ing.Spec.Rules { host := rule.Host @@ -892,6 +937,9 @@ func (ic *GenericController) createServers(data []interface{}, upstreams map[str // configure default location and SSL for _, ingIf := range data { ing := ingIf.(*extensions.Ingress) + if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) { + continue + } for _, rule := range ing.Spec.Rules { host := rule.Host @@ -920,21 +968,6 @@ func (ic *GenericController) createServers(data []interface{}, upstreams map[str servers[host].SSLCertificate = cert.PemFileName servers[host].SSLPemChecksum = cert.PemSHA } - } else { - - servers[host].SSLCertificate = defaultPemFileName - servers[host].SSLPemChecksum = defaultPemSHA - } - } - - if ing.Spec.Backend != nil { - defUpstream := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort.String()) - if backendUpstream, ok := upstreams[defUpstream]; ok { - if host == "" || host == defServerName { - ic.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING", "error: rules with Spec.Backend are allowed only with hostnames") - continue - } - servers[host].Locations[0].Backend = backendUpstream.Name } } } @@ -976,7 +1009,7 @@ func (ic *GenericController) getEndpoints( port, err := service.GetPortMapping(servicePort.StrVal, s) if err == nil { targetPort = port - continue + break } glog.Warningf("error mapping service port: %v", err) @@ -1040,6 +1073,7 @@ func (ic GenericController) Start() { go ic.ingController.Run(ic.stopCh) go ic.endpController.Run(ic.stopCh) go ic.svcController.Run(ic.stopCh) + go ic.nodeController.Run(ic.stopCh) go ic.secrController.Run(ic.stopCh) go ic.mapController.Run(ic.stopCh) diff --git a/core/pkg/ingress/controller/launch.go b/core/pkg/ingress/controller/launch.go index a1f325d4f..144da3969 100644 --- a/core/pkg/ingress/controller/launch.go +++ b/core/pkg/ingress/controller/launch.go @@ -82,8 +82,12 @@ func NewIngressController(backend ingress.Controller) *GenericController { updateStatus = flags.Bool("update-status", true, `Indicates if the ingress controller should update the Ingress status IP/hostname. Default is true`) + + electionID = flags.String("election-id", "ingress-controller-leader", `Election id to use for status update.`) ) + backend.OverrideFlags(flags) + flags.AddGoFlagSet(flag.CommandLine) flags.Parse(os.Args) @@ -124,21 +128,41 @@ func NewIngressController(backend ingress.Controller) *GenericController { glog.Infof("service %v validated as source of Ingress status", *publishSvc) } - if *configMap != "" { - _, _, err = k8s.ParseNameNS(*configMap) + for _, configMap := range []string{*configMap, *tcpConfigMapName, *udpConfigMapName} { + + if configMap == "" { + continue + } + + _, err = k8s.IsValidConfigMap(kubeClient, configMap) + if err != nil { - glog.Fatalf("configmap error: %v", err) + glog.Fatalf("%v", err) } } - os.MkdirAll(ingress.DefaultSSLDirectory, 0655) + if *watchNamespace != "" { + + _, err = k8s.IsValidNamespace(kubeClient, *watchNamespace) + + if err != nil { + glog.Fatalf("no watchNamespace with name %v found: %v", *watchNamespace, err) + } + } + + err = os.MkdirAll(ingress.DefaultSSLDirectory, 0655) + if err != nil { + glog.Errorf("Failed to mkdir SSL directory: %v", err) + } config := &Configuration{ UpdateStatus: *updateStatus, + ElectionID: *electionID, Client: kubeClient, ResyncPeriod: *resyncPeriod, DefaultService: *defaultSvc, IngressClass: *ingressClass, + DefaultIngressClass: backend.DefaultIngressClass(), Namespace: *watchNamespace, ConfigMapName: *configMap, TCPConfigMapName: *tcpConfigMapName, diff --git a/core/pkg/ingress/controller/named_port.go b/core/pkg/ingress/controller/named_port.go index c1cdeb6e2..abc1e6ef2 100644 --- a/core/pkg/ingress/controller/named_port.go +++ b/core/pkg/ingress/controller/named_port.go @@ -32,7 +32,7 @@ import ( ) // checkSvcForUpdate verifies if one of the running pods for a service contains -// named port. If the annotation in the service does not exists or is not equals +// named port. If the annotation in the service does not exist or is not equals // to the port mapping obtained from the pod the service must be updated to reflect // the current state func (ic *GenericController) checkSvcForUpdate(svc *api.Service) error { diff --git a/core/pkg/ingress/controller/util.go b/core/pkg/ingress/controller/util.go index 439526cba..77b88ba0c 100644 --- a/core/pkg/ingress/controller/util.go +++ b/core/pkg/ingress/controller/util.go @@ -22,10 +22,7 @@ import ( "github.com/golang/glog" "github.com/imdario/mergo" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/ingress/core/pkg/ingress" - "k8s.io/ingress/core/pkg/ingress/annotations/parser" ) // DeniedKeyName name of the key that contains the reason to deny a location @@ -84,22 +81,6 @@ func matchHostnames(pattern, host string) bool { return true } -// IsValidClass returns true if the given Ingress either doesn't specify -// the ingress.class annotation, or it's set to the configured in the -// ingress controller. -func IsValidClass(ing *extensions.Ingress, class string) bool { - if class == "" { - return true - } - - cc, _ := parser.GetStringAnnotation(ingressClassKey, ing) - if cc == "" { - return true - } - - return cc == class -} - func mergeLocationAnnotations(loc *ingress.Location, anns map[string]interface{}) { if _, ok := anns[DeniedKeyName]; ok { loc.Denied = anns[DeniedKeyName].(error) diff --git a/core/pkg/ingress/controller/util_test.go b/core/pkg/ingress/controller/util_test.go index 370714533..b4da882d2 100644 --- a/core/pkg/ingress/controller/util_test.go +++ b/core/pkg/ingress/controller/util_test.go @@ -19,17 +19,16 @@ package controller import ( "testing" + "reflect" + "k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/ingress/annotations/auth" "k8s.io/ingress/core/pkg/ingress/annotations/authreq" + "k8s.io/ingress/core/pkg/ingress/annotations/authtls" "k8s.io/ingress/core/pkg/ingress/annotations/ipwhitelist" "k8s.io/ingress/core/pkg/ingress/annotations/proxy" "k8s.io/ingress/core/pkg/ingress/annotations/ratelimit" "k8s.io/ingress/core/pkg/ingress/annotations/rewrite" - "k8s.io/ingress/core/pkg/ingress/resolver" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "reflect" ) type fakeError struct{} @@ -38,32 +37,6 @@ func (fe *fakeError) Error() string { return "fakeError" } -func TestIsValidClass(t *testing.T) { - ing := &extensions.Ingress{ - ObjectMeta: api.ObjectMeta{ - Name: "foo", - Namespace: api.NamespaceDefault, - }, - } - - b := IsValidClass(ing, "") - if !b { - t.Error("Expected a valid class (missing annotation)") - } - - data := map[string]string{} - data[ingressClassKey] = "custom" - ing.SetAnnotations(data) - b = IsValidClass(ing, "custom") - if !b { - t.Errorf("Expected valid class but %v returned", b) - } - b = IsValidClass(ing, "nginx") - if b { - t.Errorf("Expected invalid class but %v returned", b) - } -} - func TestIsHostValid(t *testing.T) { fkCert := &ingress.SSLCert{ CAFileName: "foo", @@ -130,7 +103,7 @@ func TestMergeLocationAnnotations(t *testing.T) { "Redirect": rewrite.Redirect{}, "Whitelist": ipwhitelist.SourceRange{}, "Proxy": proxy.Configuration{}, - "CertificateAuth": resolver.AuthSSLCert{}, + "CertificateAuth": authtls.AuthSSLConfig{}, "UsePortInRedirects": true, } diff --git a/core/pkg/ingress/defaults/main.go b/core/pkg/ingress/defaults/main.go index ba56bc7c9..d8420da04 100644 --- a/core/pkg/ingress/defaults/main.go +++ b/core/pkg/ingress/defaults/main.go @@ -37,6 +37,16 @@ type Backend struct { // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) ProxyBufferSize string `json:"proxy-buffer-size"` + // Sets a text that should be changed in the path attribute of the “Set-Cookie” header fields of + // a proxied server response. + // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_path + ProxyCookiePath string `json:"proxy-cookie-path"` + + // Sets a text that should be changed in the domain attribute of the “Set-Cookie” header fields + // of a proxied server response. + // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain + ProxyCookieDomain string `json:"proxy-cookie-domain"` + // Name server/s used to resolve names of upstream servers into IP addresses. // The file /etc/resolv.conf is used as DNS resolution configuration. Resolver []net.IP @@ -49,6 +59,10 @@ type Backend struct { // Enables or disables the redirect (301) to the HTTPS port SSLRedirect bool `json:"ssl-redirect"` + // Enables or disables the redirect (301) to the HTTPS port even without TLS cert + // This is useful if doing SSL offloading outside of cluster eg AWS ELB + ForceSSLRedirect bool `json:"force-ssl-redirect"` + // Enables or disables the specification of port in redirects // Default: false UsePortInRedirects bool `json:"use-port-in-redirects"` diff --git a/core/pkg/ingress/resolver/main.go b/core/pkg/ingress/resolver/main.go index 1e122e236..a11b35f58 100644 --- a/core/pkg/ingress/resolver/main.go +++ b/core/pkg/ingress/resolver/main.go @@ -28,7 +28,7 @@ type DefaultBackend interface { GetDefaultBackend() defaults.Backend } -// Secret has a method that searchs for secrets contenating +// Secret has a method that searches for secrets contenating // the namespace and name using a the character / type Secret interface { GetSecret(string) (*api.Secret, error) @@ -37,8 +37,6 @@ type Secret interface { // AuthCertificate resolves a given secret name into an SSL certificate. // The secret must contain 3 keys named: // ca.crt: contains the certificate chain used for authentication -// tls.crt: (ignored) contains the tls certificate chain, or any other valid base64 data -// tls.key: (ignored) contains the tls secret key, or any other valid base64 data type AuthCertificate interface { GetAuthCertificate(string) (*AuthSSLCert, error) } @@ -48,10 +46,6 @@ type AuthCertificate interface { type AuthSSLCert struct { // Secret contains the name of the secret this was fetched from Secret string `json:"secret"` - // CertFileName contains the filename the secret's 'tls.crt' was saved to - CertFileName string `json:"certFilename"` - // KeyFileName contains the path the secret's 'tls.key' - KeyFileName string `json:"keyFilename"` // CAFileName contains the path to the secrets 'ca.crt' CAFileName string `json:"caFilename"` // PemSHA contains the SHA1 hash of the 'tls.crt' value diff --git a/core/pkg/ingress/sort_ingress.go b/core/pkg/ingress/sort_ingress.go index 621b95232..cc5f2d76d 100644 --- a/core/pkg/ingress/sort_ingress.go +++ b/core/pkg/ingress/sort_ingress.go @@ -54,10 +54,6 @@ type ServerByName []*Server func (c ServerByName) Len() int { return len(c) } func (c ServerByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } func (c ServerByName) Less(i, j int) bool { - // special case for catch all server - if c[j].Hostname == "_" { - return false - } return c[i].Hostname < c[j].Hostname } diff --git a/core/pkg/ingress/status/election_test.go b/core/pkg/ingress/status/election_test.go new file mode 100644 index 000000000..4726aa8af --- /dev/null +++ b/core/pkg/ingress/status/election_test.go @@ -0,0 +1,130 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "encoding/json" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + tc "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" +) + +func TestGetCurrentLeaderLeaderExist(t *testing.T) { + fkER := resourcelock.LeaderElectionRecord{ + HolderIdentity: "currentLeader", + LeaseDurationSeconds: 30, + AcquireTime: unversioned.Now(), + RenewTime: unversioned.Now(), + LeaderTransitions: 3, + } + leaderInfo, _ := json.Marshal(fkER) + fkEndpoints := api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "ingress-controller-test", + Namespace: api.NamespaceSystem, + Annotations: map[string]string{ + resourcelock.LeaderElectionRecordAnnotationKey: string(leaderInfo), + }, + }, + } + fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{fkEndpoints}}) + identity, endpoints, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk) + if err != nil { + t.Fatalf("expected identitiy and endpoints but returned error %s", err) + } + + if endpoints == nil { + t.Fatalf("returned nil but expected an endpoints") + } + + if identity != "currentLeader" { + t.Fatalf("returned %v but expected %v", identity, "currentLeader") + } +} + +func TestGetCurrentLeaderLeaderNotExist(t *testing.T) { + fkEndpoints := api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "ingress-controller-test", + Namespace: api.NamespaceSystem, + Annotations: map[string]string{}, + }, + } + fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{fkEndpoints}}) + identity, endpoints, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk) + if err != nil { + t.Fatalf("unexpeted error: %v", err) + } + + if endpoints == nil { + t.Fatalf("returned nil but expected an endpoints") + } + + if identity != "" { + t.Fatalf("returned %s but expected %s", identity, "") + } +} + +func TestGetCurrentLeaderAnnotationError(t *testing.T) { + fkEndpoints := api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "ingress-controller-test", + Namespace: api.NamespaceSystem, + Annotations: map[string]string{ + resourcelock.LeaderElectionRecordAnnotationKey: "just-test-error-leader-annotation", + }, + }, + } + fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{fkEndpoints}}) + _, _, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk) + if err == nil { + t.Errorf("expected error") + } +} + +func TestNewElection(t *testing.T) { + fk := tc.NewSimpleClientset(&api.EndpointsList{Items: []api.Endpoints{ + { + ObjectMeta: api.ObjectMeta{ + Name: "ingress-controller-test", + Namespace: api.NamespaceSystem, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "ingress-controller-test-020", + Namespace: api.NamespaceSystem, + }, + }, + }}) + + ne, err := NewElection("ingress-controller-test", "startLeader", api.NamespaceSystem, 4*time.Second, func(leader string) { + // do nothing + go t.Logf("execute callback fun, leader is: %s", leader) + }, fk) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + if ne == nil { + t.Fatalf("unexpected nil") + } +} diff --git a/core/pkg/ingress/status/status.go b/core/pkg/ingress/status/status.go index 9d8bc6b5b..6e54d422b 100644 --- a/core/pkg/ingress/status/status.go +++ b/core/pkg/ingress/status/status.go @@ -32,8 +32,9 @@ import ( "k8s.io/kubernetes/pkg/util/wait" cache_store "k8s.io/ingress/core/pkg/cache" + "k8s.io/ingress/core/pkg/ingress/annotations/class" "k8s.io/ingress/core/pkg/k8s" - strings "k8s.io/ingress/core/pkg/strings" + "k8s.io/ingress/core/pkg/strings" "k8s.io/ingress/core/pkg/task" ) @@ -52,6 +53,10 @@ type Config struct { Client clientset.Interface PublishService string IngressLister cache_store.StoreToIngressLister + ElectionID string + + DefaultIngressClass string + IngressClass string } // statusSync keeps the status IP in each Ingress rule updated executing a periodic check @@ -171,7 +176,7 @@ func NewStatusSyncer(config Config) Sync { } st.syncQueue = task.NewCustomTaskQueue(st.sync, st.keyfunc) - le, err := NewElection("ingress-controller-leader", + le, err := NewElection(config.ElectionID, pod.Name, pod.Namespace, 30*time.Second, st.callback, config.Client) if err != nil { @@ -242,7 +247,12 @@ func (s *statusSync) updateStatus(newIPs []api.LoadBalancerIngress) { wg.Add(len(ings)) for _, cur := range ings { ing := cur.(*extensions.Ingress) - go func(wg *sync.WaitGroup) { + + if !class.IsValid(ing, s.Config.IngressClass, s.Config.DefaultIngressClass) { + continue + } + + go func(wg *sync.WaitGroup, ing *extensions.Ingress) { defer wg.Done() ingClient := s.Client.Extensions().Ingresses(ing.Namespace) currIng, err := ingClient.Get(ing.Name) @@ -251,7 +261,7 @@ func (s *statusSync) updateStatus(newIPs []api.LoadBalancerIngress) { return } - curIPs := ing.Status.LoadBalancer.Ingress + curIPs := currIng.Status.LoadBalancer.Ingress sort.Sort(loadBalancerIngressByIP(curIPs)) if ingressSliceEqual(newIPs, curIPs) { glog.V(3).Infof("skipping update of Ingress %v/%v (there is no change)", currIng.Namespace, currIng.Name) @@ -264,7 +274,7 @@ func (s *statusSync) updateStatus(newIPs []api.LoadBalancerIngress) { if err != nil { glog.Warningf("error updating ingress rule: %v", err) } - }(&wg) + }(&wg, ing) } wg.Wait() diff --git a/core/pkg/ingress/status/status_test.go b/core/pkg/ingress/status/status_test.go new file mode 100644 index 000000000..57e9f1e84 --- /dev/null +++ b/core/pkg/ingress/status/status_test.go @@ -0,0 +1,487 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "os" + "sort" + "sync" + "testing" + "time" + + cache_store "k8s.io/ingress/core/pkg/cache" + "k8s.io/ingress/core/pkg/k8s" + "k8s.io/ingress/core/pkg/task" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/cache" + testclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/util/sets" +) + +func buildLoadBalancerIngressByIP() loadBalancerIngressByIP { + return []api.LoadBalancerIngress{ + { + IP: "10.0.0.1", + Hostname: "foo1", + }, + { + IP: "10.0.0.2", + Hostname: "foo2", + }, + { + IP: "10.0.0.3", + Hostname: "", + }, + { + IP: "", + Hostname: "foo4", + }, + } +} + +func buildSimpleClientSet() *testclient.Clientset { + return testclient.NewSimpleClientset( + &api.PodList{Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo1", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "lable_sig": "foo_pod", + }, + }, + Spec: api.PodSpec{ + NodeName: "foo_node_2", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "foo2", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "lable_sig": "foo_no", + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "foo3", + Namespace: api.NamespaceSystem, + Labels: map[string]string{ + "lable_sig": "foo_pod", + }, + }, + Spec: api.PodSpec{ + NodeName: "foo_node_2", + }, + }, + }}, + &api.ServiceList{Items: []api.Service{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Status: api.ServiceStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: buildLoadBalancerIngressByIP(), + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "foo_non_exist", + Namespace: api.NamespaceDefault, + }, + }, + }}, + &api.NodeList{Items: []api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo_node_1", + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{ + { + Type: api.NodeLegacyHostIP, + Address: "10.0.0.1", + }, { + Type: api.NodeExternalIP, + Address: "10.0.0.2", + }, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "foo_node_2", + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{ + { + Type: api.NodeLegacyHostIP, + Address: "11.0.0.1", + }, + { + Type: api.NodeExternalIP, + Address: "11.0.0.2", + }, + }, + }, + }, + }}, + &api.EndpointsList{Items: []api.Endpoints{ + { + ObjectMeta: api.ObjectMeta{ + Name: "ingress-controller-leader", + Namespace: api.NamespaceDefault, + }, + }}}, + &extensions.IngressList{Items: buildExtensionsIngresses()}, + ) +} + +func fakeSynFn(interface{}) error { + return nil +} + +func buildExtensionsIngresses() []extensions.Ingress { + return []extensions.Ingress{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo_ingress_1", + Namespace: api.NamespaceDefault, + }, + Status: extensions.IngressStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + { + IP: "10.0.0.1", + Hostname: "foo1", + }, + }, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "foo_ingress_2", + Namespace: api.NamespaceDefault, + }, + Status: extensions.IngressStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{}, + }, + }, + }, + } +} + +func buildIngressLIstener() cache_store.StoreToIngressLister { + store := cache.NewStore(cache.MetaNamespaceKeyFunc) + ids := sets.NewString("foo_ingress_non_01") + for id := range ids { + store.Add(&extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: id, + Namespace: api.NamespaceDefault, + }}) + } + store.Add(&extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo_ingress_1", + Namespace: api.NamespaceDefault, + }, + Status: extensions.IngressStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: buildLoadBalancerIngressByIP(), + }, + }, + }) + return cache_store.StoreToIngressLister{Store: store} +} + +func buildStatusSync() statusSync { + return statusSync{ + pod: &k8s.PodInfo{ + Name: "foo_base_pod", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "lable_sig": "foo_pod", + }, + }, + runLock: &sync.Mutex{}, + syncQueue: task.NewTaskQueue(fakeSynFn), + Config: Config{ + Client: buildSimpleClientSet(), + PublishService: api.NamespaceDefault + "/" + "foo", + IngressLister: buildIngressLIstener(), + }, + } +} + +func TestStatusActions(t *testing.T) { + // make sure election can be created + os.Setenv("POD_NAME", "foo1") + os.Setenv("POD_NAMESPACE", api.NamespaceDefault) + c := Config{ + Client: buildSimpleClientSet(), + PublishService: "", + IngressLister: buildIngressLIstener(), + } + // create object + fkSync := NewStatusSyncer(c) + if fkSync == nil { + t.Fatalf("expected a valid Sync") + } + + fk := fkSync.(statusSync) + + ns := make(chan struct{}) + // start it and wait for the election and syn actions + go fk.Run(ns) + // wait for the election + time.Sleep(100 * time.Millisecond) + // execute sync + fk.sync("just-test") + // PublishService is empty, so the running address is: ["11.0.0.2"] + // after updated, the ingress's ip should only be "11.0.0.2" + newIPs := []api.LoadBalancerIngress{{ + IP: "11.0.0.2", + }} + fooIngress1, err1 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_1") + if err1 != nil { + t.Fatalf("unexpected error") + } + fooIngress1CurIPs := fooIngress1.Status.LoadBalancer.Ingress + if !ingressSliceEqual(fooIngress1CurIPs, newIPs) { + t.Fatalf("returned %v but expected %v", fooIngress1CurIPs, newIPs) + } + + // execute shutdown + fk.Shutdown() + // ingress should be empty + newIPs2 := []api.LoadBalancerIngress{} + fooIngress2, err2 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_1") + if err2 != nil { + t.Fatalf("unexpected error") + } + fooIngress2CurIPs := fooIngress2.Status.LoadBalancer.Ingress + if !ingressSliceEqual(fooIngress2CurIPs, newIPs2) { + t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, newIPs2) + } + + // end test + ns <- struct{}{} +} + +func TestCallback(t *testing.T) { + fk := buildStatusSync() + // do nothing + fk.callback("foo_base_pod") +} + +func TestKeyfunc(t *testing.T) { + fk := buildStatusSync() + i := "foo_base_pod" + r, err := fk.keyfunc(i) + + if err != nil { + t.Fatalf("unexpected error") + } + if r != i { + t.Errorf("returned %v but expected %v", r, i) + } +} + +func TestRunningAddresessWithPublishService(t *testing.T) { + fk := buildStatusSync() + + r, _ := fk.runningAddresess() + if r == nil { + t.Fatalf("returned nil but expected valid []string") + } + rl := len(r) + if len(r) != 4 { + t.Errorf("returned %v but expected %v", rl, 4) + } +} + +func TestRunningAddresessWithPods(t *testing.T) { + fk := buildStatusSync() + fk.PublishService = "" + + r, _ := fk.runningAddresess() + if r == nil { + t.Fatalf("returned nil but expected valid []string") + } + rl := len(r) + if len(r) != 1 { + t.Fatalf("returned %v but expected %v", rl, 1) + } + rv := r[0] + if rv != "11.0.0.2" { + t.Errorf("returned %v but expected %v", rv, "11.0.0.2") + } +} + +func TestUpdateStatus(t *testing.T) { + fk := buildStatusSync() + newIPs := buildLoadBalancerIngressByIP() + sort.Sort(loadBalancerIngressByIP(newIPs)) + fk.updateStatus(newIPs) + + fooIngress1, err1 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_1") + if err1 != nil { + t.Fatalf("unexpected error") + } + fooIngress1CurIPs := fooIngress1.Status.LoadBalancer.Ingress + if !ingressSliceEqual(fooIngress1CurIPs, newIPs) { + t.Fatalf("returned %v but expected %v", fooIngress1CurIPs, newIPs) + } + + fooIngress2, err2 := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_2") + if err2 != nil { + t.Fatalf("unexpected error") + } + fooIngress2CurIPs := fooIngress2.Status.LoadBalancer.Ingress + if !ingressSliceEqual(fooIngress2CurIPs, []api.LoadBalancerIngress{}) { + t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, []api.LoadBalancerIngress{}) + } +} + +func TestSliceToStatus(t *testing.T) { + fkEndpoints := []string{ + "10.0.0.1", + "2001:db8::68", + "opensource-k8s-ingress", + } + + r := sliceToStatus(fkEndpoints) + + if r == nil { + t.Fatalf("returned nil but expected a valid []api.LoadBalancerIngress") + } + rl := len(r) + if rl != 3 { + t.Fatalf("returned %v but expected %v", rl, 3) + } + re1 := r[0] + if re1.Hostname != "opensource-k8s-ingress" { + t.Fatalf("returned %v but expected %v", re1, api.LoadBalancerIngress{Hostname: "opensource-k8s-ingress"}) + } + re2 := r[1] + if re2.IP != "10.0.0.1" { + t.Fatalf("returned %v but expected %v", re2, api.LoadBalancerIngress{IP: "10.0.0.1"}) + } + re3 := r[2] + if re3.IP != "2001:db8::68" { + t.Fatalf("returned %v but expected %v", re3, api.LoadBalancerIngress{IP: "2001:db8::68"}) + } +} + +func TestIngressSliceEqual(t *testing.T) { + fk1 := buildLoadBalancerIngressByIP() + fk2 := append(buildLoadBalancerIngressByIP(), api.LoadBalancerIngress{ + IP: "10.0.0.5", + Hostname: "foo5", + }) + fk3 := buildLoadBalancerIngressByIP() + fk3[0].Hostname = "foo_no_01" + fk4 := buildLoadBalancerIngressByIP() + fk4[2].IP = "11.0.0.3" + + fooTests := []struct { + lhs []api.LoadBalancerIngress + rhs []api.LoadBalancerIngress + er bool + }{ + {fk1, fk1, true}, + {fk2, fk1, false}, + {fk3, fk1, false}, + {fk4, fk1, false}, + {fk1, nil, false}, + {nil, nil, true}, + {[]api.LoadBalancerIngress{}, []api.LoadBalancerIngress{}, true}, + } + + for _, fooTest := range fooTests { + r := ingressSliceEqual(fooTest.lhs, fooTest.rhs) + if r != fooTest.er { + t.Errorf("returned %v but expected %v", r, fooTest.er) + } + } +} + +func TestLoadBalancerIngressByIPLen(t *testing.T) { + fooTests := []struct { + ips loadBalancerIngressByIP + el int + }{ + {[]api.LoadBalancerIngress{}, 0}, + {buildLoadBalancerIngressByIP(), 4}, + {nil, 0}, + } + + for _, fooTest := range fooTests { + r := fooTest.ips.Len() + if r != fooTest.el { + t.Errorf("returned %v but expected %v ", r, fooTest.el) + } + } +} + +func TestLoadBalancerIngressByIPSwap(t *testing.T) { + fooTests := []struct { + ips loadBalancerIngressByIP + i int + j int + }{ + {buildLoadBalancerIngressByIP(), 0, 1}, + {buildLoadBalancerIngressByIP(), 2, 1}, + } + + for _, fooTest := range fooTests { + fooi := fooTest.ips[fooTest.i] + fooj := fooTest.ips[fooTest.j] + fooTest.ips.Swap(fooTest.i, fooTest.j) + if fooi.IP != fooTest.ips[fooTest.j].IP || + fooj.IP != fooTest.ips[fooTest.i].IP { + t.Errorf("failed to swap for loadBalancerIngressByIP") + } + } +} + +func TestLoadBalancerIngressByIPLess(t *testing.T) { + fooTests := []struct { + ips loadBalancerIngressByIP + i int + j int + er bool + }{ + {buildLoadBalancerIngressByIP(), 0, 1, true}, + {buildLoadBalancerIngressByIP(), 2, 1, false}, + } + + for _, fooTest := range fooTests { + r := fooTest.ips.Less(fooTest.i, fooTest.j) + if r != fooTest.er { + t.Errorf("returned %v but expected %v ", r, fooTest.er) + } + } +} diff --git a/core/pkg/ingress/types.go b/core/pkg/ingress/types.go index 4891995e7..8121abe4d 100644 --- a/core/pkg/ingress/types.go +++ b/core/pkg/ingress/types.go @@ -17,17 +17,22 @@ limitations under the License. package ingress import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/healthz" + "github.com/spf13/pflag" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/healthz" + "k8s.io/kubernetes/pkg/util/intstr" + + cache_store "k8s.io/ingress/core/pkg/cache" "k8s.io/ingress/core/pkg/ingress/annotations/auth" "k8s.io/ingress/core/pkg/ingress/annotations/authreq" + "k8s.io/ingress/core/pkg/ingress/annotations/authtls" "k8s.io/ingress/core/pkg/ingress/annotations/ipwhitelist" "k8s.io/ingress/core/pkg/ingress/annotations/proxy" "k8s.io/ingress/core/pkg/ingress/annotations/ratelimit" "k8s.io/ingress/core/pkg/ingress/annotations/rewrite" "k8s.io/ingress/core/pkg/ingress/defaults" - "k8s.io/ingress/core/pkg/ingress/resolver" ) var ( @@ -81,11 +86,29 @@ type Controller interface { OnUpdate(Configuration) ([]byte, error) // ConfigMap content of --configmap SetConfig(*api.ConfigMap) + // SetListers allows the access of store listers present in the generic controller + // This avoid the use of the kubernetes client. + SetListers(StoreLister) // BackendDefaults returns the minimum settings required to configure the // communication to endpoints BackendDefaults() defaults.Backend // Info returns information about the ingress controller Info() *BackendInfo + // OverrideFlags allow the customization of the flags in the backend + OverrideFlags(*pflag.FlagSet) + // DefaultIngressClass just return the default ingress class + DefaultIngressClass() string +} + +// StoreLister returns the configured stores for ingresses, services, +// endpoints, secrets and configmaps. +type StoreLister struct { + Ingress cache_store.StoreToIngressLister + Service cache.StoreToServiceLister + Node cache.StoreToNodeLister + Endpoint cache.StoreToEndpointsLister + Secret cache_store.StoreToSecretsLister + ConfigMap cache_store.StoreToConfigmapLister } // BackendInfo returns information about the backend. @@ -112,10 +135,10 @@ type Configuration struct { Servers []*Server `json:"servers"` // TCPEndpoints contain endpoints for tcp streams handled by this backend // +optional - TCPEndpoints []*Location `json:"tcpEndpoints,omitempty"` - // UPDEndpoints contain endpoints for udp streams handled by this backend + TCPEndpoints []L4Service `json:"tcpEndpoints,omitempty"` + // UDPEndpoints contain endpoints for udp streams handled by this backend // +optional - UPDEndpoints []*Location `json:"udpEndpoints,omitempty"` + UDPEndpoints []L4Service `json:"udpEndpoints,omitempty"` // PassthroughBackend contains the backends used for SSL passthrough. // It contains information about the associated Server Name Indication (SNI). // +optional @@ -134,9 +157,29 @@ type Backend struct { Secure bool `json:"secure"` // Endpoints contains the list of endpoints currently running Endpoints []Endpoint `json:"endpoints"` + // StickySession contains the StickyConfig object with stickness configuration + + SessionAffinity SessionAffinityConfig } -// Endpoint describes a kubernetes endpoint in an backend +// SessionAffinityConfig describes different affinity configurations for new sessions. +// Once a session is mapped to a backend based on some affinity setting, it +// retains that mapping till the backend goes down, or the ingress controller +// restarts. Exactly one of these values will be set on the upstream, since multiple +// affinity values are incompatible. Once set, the backend makes no guarantees +// about honoring updates. +type SessionAffinityConfig struct { + AffinityType string `json:"name"` + CookieSessionAffinity CookieSessionAffinity +} + +// CookieSessionAffinity defines the structure used in Affinity configured by Cookies. +type CookieSessionAffinity struct { + Name string `json:"name"` + Hash string `json:"hash"` +} + +// Endpoint describes a kubernetes endpoint in a backend type Endpoint struct { // Address IP address of the endpoint Address string `json:"address"` @@ -233,10 +276,13 @@ type Location struct { // CertificateAuth indicates the access to this location requires // external authentication // +optional - CertificateAuth resolver.AuthSSLCert `json:"certificateAuth,omitempty"` + CertificateAuth authtls.AuthSSLConfig `json:"certificateAuth,omitempty"` // UsePortInRedirects indicates if redirects must specify the port // +optional UsePortInRedirects bool `json:"use-port-in-redirects"` + // ConfigurationSnippet contains additional configuration for the backend + // to be considered in the configuration of the location + ConfigurationSnippet string `json:"configuration-snippet"` } // SSLPassthroughBackend describes a SSL upstream server configured @@ -249,3 +295,21 @@ type SSLPassthroughBackend struct { // Hostname returns the FQDN of the server Hostname string `json:"hostname"` } + +// L4Service describes a L4 Ingress service. +type L4Service struct { + // Port external port to expose + Port int `json:"port"` + // Backend of the service + Backend L4Backend `json:"backend"` + // Endpoints active endpoints of the service + Endpoints []Endpoint `json:"endpoins"` +} + +// L4Backend describes the kubernetes service behind L4 Ingress service +type L4Backend struct { + Port intstr.IntOrString `json:"port"` + Name string `json:"name"` + Namespace string `json:"namespace"` + Protocol api.Protocol `json:"protocol"` +} diff --git a/core/pkg/k8s/main.go b/core/pkg/k8s/main.go index 2f0caf5e5..af0b5df73 100644 --- a/core/pkg/k8s/main.go +++ b/core/pkg/k8s/main.go @@ -34,6 +34,30 @@ func IsValidService(kubeClient clientset.Interface, name string) (*api.Service, return kubeClient.Core().Services(ns).Get(name) } +// isValidConfigMap check if exists a configmap with the specified name +func IsValidConfigMap(kubeClient clientset.Interface, fullName string) (*api.ConfigMap, error) { + + ns, name, err := ParseNameNS(fullName) + + if err != nil { + return nil, err + } + + configMap, err := kubeClient.Core().ConfigMaps(ns).Get(name) + + if err != nil { + return nil, fmt.Errorf("configmap not found: %v", err) + } + + return configMap, nil + +} + +// isValidNamespace chck if exists a namespace with the specified name +func IsValidNamespace(kubeClient clientset.Interface, name string) (*api.Namespace, error) { + return kubeClient.Core().Namespaces().Get(name) +} + // IsValidSecret checks if exists a secret with the specified name func IsValidSecret(kubeClient clientset.Interface, name string) (*api.Secret, error) { ns, name, err := ParseNameNS(name) diff --git a/core/pkg/k8s/main_test.go b/core/pkg/k8s/main_test.go index 384a7313d..58f640dec 100644 --- a/core/pkg/k8s/main_test.go +++ b/core/pkg/k8s/main_test.go @@ -85,6 +85,64 @@ func TestIsValidService(t *testing.T) { } } +func TestIsValidNamespace(t *testing.T) { + + fk := testclient.NewSimpleClientset(&api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "default", + }, + }) + + _, err := IsValidNamespace(fk, "empty") + if err == nil { + t.Errorf("expected error but return nill") + } + + ns, err := IsValidNamespace(fk, "default") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if ns == nil { + t.Errorf("expected a configmap but returned nil") + } + +} + +func TestIsValidConfigMap(t *testing.T) { + + fk := testclient.NewSimpleClientset(&api.ConfigMap{ + ObjectMeta: api.ObjectMeta{ + Namespace: api.NamespaceDefault, + Name: "demo", + }, + }) + + _, err := IsValidConfigMap(fk, "") + if err == nil { + t.Errorf("expected error but return nill") + } + + s, err := IsValidConfigMap(fk, "default/demo") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if s == nil { + t.Errorf("expected a configmap but returned nil") + } + + fk = testclient.NewSimpleClientset() + s, err = IsValidConfigMap(fk, "default/demo") + if err == nil { + t.Errorf("expected an error but returned nil") + } + if s != nil { + t.Errorf("unexpected Configmap returned: %v", s) + } + +} + func TestIsValidSecret(t *testing.T) { fk := testclient.NewSimpleClientset(&api.Secret{ ObjectMeta: api.ObjectMeta{ diff --git a/core/pkg/net/ssl/ssl.go b/core/pkg/net/ssl/ssl.go index 1ac6d2fa2..c758423fc 100644 --- a/core/pkg/net/ssl/ssl.go +++ b/core/pkg/net/ssl/ssl.go @@ -17,14 +17,19 @@ limitations under the License. package ssl import ( + "crypto/rand" + "crypto/rsa" "crypto/sha1" "crypto/x509" + "crypto/x509/pkix" "encoding/hex" "encoding/pem" "errors" "fmt" "io/ioutil" + "math/big" "os" + "time" "github.com/golang/glog" @@ -37,9 +42,11 @@ func AddOrUpdateCertAndKey(name string, cert, key, ca []byte) (*ingress.SSLCert, pemFileName := fmt.Sprintf("%v/%v", ingress.DefaultSSLDirectory, pemName) tempPemFile, err := ioutil.TempFile(ingress.DefaultSSLDirectory, pemName) + if err != nil { - return nil, fmt.Errorf("could not create temp pem file %v: %v", tempPemFile.Name(), err) + return nil, fmt.Errorf("could not create temp pem file %v: %v", pemFileName, err) } + glog.V(3).Infof("Creating temp file %v for Keypair: %v", tempPemFile.Name(), pemName) _, err = tempPemFile.Write(cert) if err != nil { @@ -61,16 +68,25 @@ func AddOrUpdateCertAndKey(name string, cert, key, ca []byte) (*ingress.SSLCert, pemCerts, err := ioutil.ReadFile(tempPemFile.Name()) if err != nil { + _ = os.Remove(tempPemFile.Name()) return nil, err } - pembBock, _ := pem.Decode(pemCerts) - if pembBock == nil { + pemBlock, _ := pem.Decode(pemCerts) + if pemBlock == nil { + _ = os.Remove(tempPemFile.Name()) return nil, fmt.Errorf("No valid PEM formatted block found") } - pemCert, err := x509.ParseCertificate(pembBock.Bytes) + // If the file does not start with 'BEGIN CERTIFICATE' it's invalid and must not be used. + if pemBlock.Type != "CERTIFICATE" { + _ = os.Remove(tempPemFile.Name()) + return nil, fmt.Errorf("Certificate %v contains invalid data, and must be created with 'kubectl create secret tls'", name) + } + + pemCert, err := x509.ParseCertificate(pemBlock.Bytes) if err != nil { + _ = os.Remove(tempPemFile.Name()) return nil, err } @@ -97,58 +113,119 @@ func AddOrUpdateCertAndKey(name string, cert, key, ca []byte) (*ingress.SSLCert, return nil, errors.New(oe) } - caName := fmt.Sprintf("ca-%v.pem", name) - caFileName := fmt.Sprintf("%v/%v", ingress.DefaultSSLDirectory, caName) - f, err := os.Create(caFileName) + caFile, err := os.OpenFile(pemFileName, os.O_RDWR|os.O_APPEND, 0600) if err != nil { - return nil, fmt.Errorf("could not create ca pem file %v: %v", caFileName, err) + return nil, fmt.Errorf("Could not open file %v for writing additional CA chains: %v", pemFileName, err) } - defer f.Close() - _, err = f.Write(ca) + + defer caFile.Close() + _, err = caFile.Write([]byte("\n")) if err != nil { - return nil, fmt.Errorf("could not create ca pem file %v: %v", caFileName, err) + return nil, fmt.Errorf("could not append CA to cert file %v: %v", pemFileName, err) } - f.Write([]byte("\n")) + caFile.Write(ca) + caFile.Write([]byte("\n")) return &ingress.SSLCert{ - CAFileName: caFileName, + CAFileName: pemFileName, PemFileName: pemFileName, - PemSHA: pemSHA1(pemFileName), + PemSHA: PemSHA1(pemFileName), CN: cn, }, nil } return &ingress.SSLCert{ PemFileName: pemFileName, - PemSHA: pemSHA1(pemFileName), + PemSHA: PemSHA1(pemFileName), CN: cn, }, nil } -// SearchDHParamFile iterates all the secrets mounted inside the /etc/nginx-ssl directory -// in order to find a file with the name dhparam.pem. If such file exists it will -// returns the path. If not it just returns an empty string -func SearchDHParamFile(baseDir string) string { - files, _ := ioutil.ReadDir(baseDir) - for _, file := range files { - if !file.IsDir() { - continue - } +// AddCertAuth creates a .pem file with the specified CAs to be used in Cert Authentication +// If it's already exists, it's clobbered. +func AddCertAuth(name string, ca []byte) (*ingress.SSLCert, error) { - dhPath := fmt.Sprintf("%v/%v/dhparam.pem", baseDir, file.Name()) - if _, err := os.Stat(dhPath); err == nil { - glog.Infof("using file '%v' for parameter ssl_dhparam", dhPath) - return dhPath - } + caName := fmt.Sprintf("ca-%v.pem", name) + caFileName := fmt.Sprintf("%v/%v", ingress.DefaultSSLDirectory, caName) + + pemCABlock, _ := pem.Decode(ca) + if pemCABlock == nil { + return nil, fmt.Errorf("No valid PEM formatted block found") + } + // If the first certificate does not start with 'BEGIN CERTIFICATE' it's invalid and must not be used. + if pemCABlock.Type != "CERTIFICATE" { + return nil, fmt.Errorf("CA File %v contains invalid data, and must be created only with PEM formated certificates", name) } - glog.Warning("no file dhparam.pem found in secrets") - return "" + _, err := x509.ParseCertificate(pemCABlock.Bytes) + if err != nil { + return nil, err + } + + err = ioutil.WriteFile(caFileName, ca, 0644) + if err != nil { + return nil, fmt.Errorf("could not write CA file %v: %v", caFileName, err) + } + + glog.V(3).Infof("Created CA Certificate for authentication: %v", caFileName) + return &ingress.SSLCert{ + CAFileName: caFileName, + PemFileName: caFileName, + PemSHA: PemSHA1(caFileName), + }, nil } -// pemSHA1 returns the SHA1 of a pem file. This is used to +// AddOrUpdateDHParam creates a dh parameters file with the specified name +func AddOrUpdateDHParam(name string, dh []byte) (string, error) { + pemName := fmt.Sprintf("%v.pem", name) + pemFileName := fmt.Sprintf("%v/%v", ingress.DefaultSSLDirectory, pemName) + + tempPemFile, err := ioutil.TempFile(ingress.DefaultSSLDirectory, pemName) + + glog.V(3).Infof("Creating temp file %v for DH param: %v", tempPemFile.Name(), pemName) + if err != nil { + return "", fmt.Errorf("could not create temp pem file %v: %v", pemFileName, err) + } + + _, err = tempPemFile.Write(dh) + if err != nil { + return "", fmt.Errorf("could not write to pem file %v: %v", tempPemFile.Name(), err) + } + + err = tempPemFile.Close() + if err != nil { + return "", fmt.Errorf("could not close temp pem file %v: %v", tempPemFile.Name(), err) + } + + pemCerts, err := ioutil.ReadFile(tempPemFile.Name()) + if err != nil { + _ = os.Remove(tempPemFile.Name()) + return "", err + } + + pemBlock, _ := pem.Decode(pemCerts) + if pemBlock == nil { + _ = os.Remove(tempPemFile.Name()) + return "", fmt.Errorf("No valid PEM formatted block found") + } + + // If the file does not start with 'BEGIN DH PARAMETERS' it's invalid and must not be used. + if pemBlock.Type != "DH PARAMETERS" { + _ = os.Remove(tempPemFile.Name()) + return "", fmt.Errorf("Certificate %v contains invalid data", name) + } + + err = os.Rename(tempPemFile.Name(), pemFileName) + if err != nil { + return "", fmt.Errorf("could not move temp pem file %v to destination %v: %v", tempPemFile.Name(), pemFileName, err) + } + + return pemFileName, nil +} + +// PemSHA1 returns the SHA1 of a pem file. This is used to // reload NGINX in case a secret with a SSL certificate changed. -func pemSHA1(filename string) string { +func PemSHA1(filename string) string { hasher := sha1.New() s, err := ioutil.ReadFile(filename) if err != nil { @@ -159,23 +236,52 @@ func pemSHA1(filename string) string { return hex.EncodeToString(hasher.Sum(nil)) } -const ( - snakeOilPem = "/etc/ssl/certs/ssl-cert-snakeoil.pem" - snakeOilKey = "/etc/ssl/private/ssl-cert-snakeoil.key" -) - -// GetFakeSSLCert returns the snake oil ssl certificate created by the command -// make-ssl-cert generate-default-snakeoil --force-overwrite +// GetFakeSSLCert creates a Self Signed Certificate +// Based in the code https://golang.org/src/crypto/tls/generate_cert.go func GetFakeSSLCert() ([]byte, []byte) { - cert, err := ioutil.ReadFile(snakeOilPem) + + var priv interface{} + var err error + + priv, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { - return nil, nil + glog.Fatalf("failed to generate fake private key: %s", err) } - key, err := ioutil.ReadFile(snakeOilKey) + notBefore := time.Now() + // This certificate is valid for 365 days + notAfter := notBefore.Add(365 * 24 * time.Hour) + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { - return nil, nil + glog.Fatalf("failed to generate fake serial number: %s", err) } + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Acme Co"}, + CommonName: "Kubernetes Ingress Controller Fake Certificate", + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + DNSNames: []string{"ingress.local"}, + } + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.(*rsa.PrivateKey).PublicKey, priv) + if err != nil { + glog.Fatalf("Failed to create fake certificate: %s", err) + } + + cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + + key := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv.(*rsa.PrivateKey))}) + return cert, key } diff --git a/docs/README.md b/docs/README.md index cb8241d07..721336b0b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,3 +1,21 @@ # Ingress documentation and examples This directory contains documentation. + +## File naming convention + +Try to create a README file in every directory containing documentation and index +out from there, that's what readers will notice first. Use lower case for other +file names unless you have a reason to draw someones attention to it. +Avoid CamelCase. + +Rationale: + +* Files that are common to all controllers, or heavily index other files, are +named using ALL CAPS. This is done to indicate to the user that they should +visit these files first. Examples include PREREQUISITES and README. + +* Files specific to a controller, or files that contain information about +various controllers, are named using all lower case. Examples include +configuration and catalog files. + diff --git a/docs/admin.md b/docs/admin.md index 11ea09f24..4a7ea2b4d 100644 --- a/docs/admin.md +++ b/docs/admin.md @@ -1 +1,55 @@ # Ingress admin guide + +This is a guide to the different deployment styles of an Ingress controller. + +## Vanillla deployments + +__GCP__: On GCE/GKE, the Ingress controller runs on the +master. If you wish to stop this controller and run another instance on your +nodes instead, you can do so by following this [example](/examples/deployment/gce). + +__OSS__: You can deploy an OSS Ingress controller by simply +running it as a pod in your cluster, as shown in the [examples](/examples/deployment). +Please note that you must specify the `ingress.class` +[annotation](/examples/PREREQUISITES.md#ingress-class) if you're running on a +cloudprovider, or the cloudprovider controller will fight the OSS controller +for the Ingress. + +__AWS__: Until we have an AWS ALB Ingress controller, you can deploy the nginx +Ingress controller behind an ELB on AWS, as shows in the [next section](#stacked-deployments). + +## Stacked deployments + +__Behind a LoadBalancer Service__: You can deploy an OSS controller behind a +Service of `Type=LoadBalancer`, by following this [example](/examples/static-ip/nginx#acquiring-an-ip). +More specifically, first create a LoadBalancer Service that selects the OSS +controller pods, then start the OSS controller with the `--publish-service` +flag. + + +__Behind another Ingress__: Sometimes it is desirable to deploy a stack of +Ingresses, like the GCE Ingress -> nginx Ingress -> application. You might +want to do this because the GCE HTTP lb offers some features that the GCE +network LB does not, like a global static IP or CDN, but doesn't offer all the +features of nginx, like url rewriting or redirects. + +TODO: Write an example + +## Daemonset + +Neither a single pod or bank of OSS controllers scales with the cluster size. +If you create a daemonset of OSS Ingress controllers, every new node +automatically gets an instance of the controller listening on the specified +ports. + +TODO: Write an example + +## Intra-cluster Ingress + +Since OSS Ingress controllers run in pods, you can deploy them as intra-cluster +proxies by just not exposing them on a `hostPort` and putting them behind a +Service of `Type=ClusterIP`. + +TODO: Write an example + + diff --git a/docs/catalog.md b/docs/catalog.md index 0b34df51b..2612790c7 100644 --- a/docs/catalog.md +++ b/docs/catalog.md @@ -2,4 +2,5 @@ This is a non-comprehensive list of existing ingress controllers. - +* [Dummy controller backend](/examples/custom-controller) +* [HAProxy Ingress controller](https://github.com/jcmoraisjr/haproxy-ingress) diff --git a/docs/dev/devel.md b/docs/dev/devel.md index d472638e7..e3d7c94c2 100644 --- a/docs/dev/devel.md +++ b/docs/dev/devel.md @@ -1,3 +1,4 @@ # Writing Ingress controllers This doc outlines the basic steps needed to write an Ingress controller. +If you want the tl;dr version, skip straight to the [example](/examples/custom-controller). diff --git a/docs/dev/releases.md b/docs/dev/releases.md index 7cb3e0f1d..ce613cc23 100644 --- a/docs/dev/releases.md +++ b/docs/dev/releases.md @@ -4,7 +4,7 @@ This doc explains how to build, test and release ingress controllers. ## Building -All ingress controllers are build through a Makefile. Depending on your +All ingress controllers are built through a Makefile. Depending on your requirements you can build a raw server binary, a local container image, or push an image to a remote repository. @@ -76,7 +76,9 @@ $ cd $GOPATH/src/k8s.io/kubernetes $ ./hack/ginkgo-e2e.sh --ginkgo.focus=Ingress.* --delete-namespace-on-failure=false ``` -TODO: add instructions on running integration tests, or e2e against +See also [related FAQs](../faq#how-are-the-ingress-controllers-tested). + +[TODO](https://github.com/kubernetes/ingress/issues/5): add instructions on running integration tests, or e2e against local-up/minikube. ## Releasing diff --git a/docs/dev/setup.md b/docs/dev/setup.md index 72bd667be..76e0397eb 100644 --- a/docs/dev/setup.md +++ b/docs/dev/setup.md @@ -49,7 +49,7 @@ NAME STATUS AGE VERSION a sandboxed local cluster. You will first need to [install](https://github.com/kubernetes/minikube/releases) the minikube binary, then bring up a cluster ```console -$ minikube up +$ minikube start ``` Check for Ready nodes @@ -71,12 +71,24 @@ $ minikube addons list If this list already contains the ingress controller, you don't need to redeploy it. If the addon controller is disabled, you can enable it with ```console -$ minikube enable addons ingress +$ minikube addons enable ingress ``` If the list *does not* contain the ingress controller, you can either update minikube, or deploy it yourself as shown in the next section. +You may want to consider [using the VM's docker +daemon](https://github.com/kubernetes/minikube/blob/master/README.md#reusing-the-docker-daemon) +when developing. + +### CoreOS Kubernetes + +[CoreOS Kubernetes](https://github.com/coreos/coreos-kubernetes/) repository has `Vagrantfile` +scripts to easily create a new Kubernetes cluster on VirtualBox, VMware or AWS. + +Follow the CoreOS [doc](https://coreos.com/kubernetes/docs/latest/kubernetes-on-vagrant-single.html) +for detailed instructions. + ## Deploy the ingress controller You can deploy an ingress controller on the cluster setup in the previous step @@ -93,7 +105,7 @@ $ glbc --help pod secrets for creating a Kubernetes client. (default true) $ ./glbc --running-in-cluster=false -I1210 17:49:53.202149 27767 main.go:179] Starting GLBC image: glbc:0.8.0, cluster name +I1210 17:49:53.202149 27767 main.go:179] Starting GLBC image: glbc:0.9.2, cluster name ``` Note that this is equivalent to running the ingress controller on your local diff --git a/docs/faq/README.md b/docs/faq/README.md index 53810643d..865921581 100644 --- a/docs/faq/README.md +++ b/docs/faq/README.md @@ -23,7 +23,7 @@ Table of Contents The Kubernetes Service is an abstraction over endpoints (pod-ip:port pairings). The Ingress is an abstraction over Services. This doesn't mean all Ingress controller must route *through* a Service, but rather, that routing, security -and auth configuration is represented in the Ingerss resource per Service, and +and auth configuration is represented in the Ingress resource per Service, and not per pod. As long as this configuration is respected, a given Ingress controller is free to route to the DNS name of a Service, the VIP, a NodePort, or directly to the Service's endpoints. @@ -85,7 +85,7 @@ as well as in [this](/examples/pipeline) example. First check the [catalog](#is-there-a-catalog-of-existing-ingress-controllers), to make sure you really need to write one. -1. Write a [generic backend](https://github.com/kubernetes/ingress/blob/master/core/pkg/ingress/doc.go) +1. Write a [generic backend](/examples/custom-controller) 2. Keep it in your own repo, make sure it passes the [conformance suite](https://github.com/kubernetes/kubernetes/blob/master/test/e2e/ingress_utils.go#L112) 3. Submit an example(s) in the appropriate subdirectories [here](/examples/README.md) 4. Add it to the catalog @@ -100,7 +100,7 @@ Testing for the Ingress controllers is divided between: * Ingress repo: unittests and pre-submit integration tests run via travis * Kubernetes repo: [pre-submit e2e](https://k8s-testgrid.appspot.com/google-gce#gce&include-filter-by-regex=Loadbalancing), [post-merge e2e](https://k8s-testgrid.appspot.com/google-gce#gci-gce-ingress), - [per release-branch e2e](https://k8s-testgrid.appspot.com/google-gce#gci-gce-ingress-release-1.5) + [per release-branch e2e](https://k8s-testgrid.appspot.com/google-gce#gci-gce-ingress-1.5) The configuration for jenkins e2e tests are located [here](https://github.com/kubernetes/test-infra). The Ingress E2Es are located [here](https://github.com/kubernetes/kubernetes/blob/master/test/e2e/ingress.go), diff --git a/docs/faq/gce.md b/docs/faq/gce.md index 7f29e9452..f42afd15d 100644 --- a/docs/faq/gce.md +++ b/docs/faq/gce.md @@ -23,6 +23,8 @@ Table of Contents * [How does Ingress work across 2 GCE clusters?](#how-does-ingress-work-across-2-gce-clusters) * [I shutdown a cluster without deleting all Ingresses, how do I manually cleanup?](#i-shutdown-a-cluster-without-deleting-all-ingresses-how-do-i-manually-cleanup) * [How do I disable the GCE Ingress controller?](#how-do-i-disable-the-gce-ingress-controller) +* [What GCE resources are shared between Ingresses?](#what-gce-resources-are-shared-between-ingresses) +* [How do I debug a controller spin loop?](#host-do-i-debug-a-controller-spinloop) ## How do I deploy an Ingress controller? @@ -30,6 +32,9 @@ Table of Contents On GCP (either GCE or GKE), every Kubernetes cluster has an Ingress controller running on the master, no deployment necessary. You can deploy a second, different (i.e non-GCE) controller, like [this](README.md#how-do-i-deploy-an-ingress-controller). +If you wish to deploy a GCE controller as a pod in your cluster, make sure to +turn down the existing auto-deployed Ingress controller as shown in this +[example](/examples/deployment/gce/). ## I created an Ingress and nothing happens, now what? @@ -87,7 +92,24 @@ for how to request more. ## Why does the Ingress need a different instance group then the GKE cluster? The controller adds/removes Kubernets nodes that are `NotReady` from the lb -instance group. +instance group. We cannot simply rely on health checks to achieve this for +a few reasons. + +First, older Kubernetes versions (<=1.3) did not mark +endpoints on unreachable nodes as NotReady. Meaning if the Kubelet didn't +heart beat for 10s, the node was marked NotReady, but there was no other signal +at the Service level to stop routing requests to endpoints on that node. In +later Kubernetes version this is handled a little better, if the Kubelet +doesn't heart beat for 10s it's marked NotReady, if it stays in NotReady +for 40s all endpoints are marked NotReady. So it is still advantageous +to pull the node out of the GCE LB Instance Group in 10s, because we +save 30s of bad requests. + +Second, continuing to send requests to NotReady nodes is not a great idea. +The NotReady condition is an aggregate of various factors. For example, +a NotReady node might still pass health checks but have the wrong +nodePort to endpoint mappings. The health check will pass as long as *something* +returns a HTTP 200. ## Why does the cloud console show 0/N healthy instances? @@ -228,6 +250,17 @@ controller will inject the default-http-backend Service that runs in the `kube-system` namespace as the default backend for the GCE HTTP lb allocated for that Ingress resource. +Some caveats concerning the default backend: + +* It is the only Backend Service that doesn't directly map to a user specified +NodePort Service +* It's created when the first Ingress is created, and deleted when the last +Ingress is deleted, since we don't want to waste quota if the user is not going +to need L7 loadbalancing through Ingress +* It has a http health check pointing at `/healthz`, not the default `/`, because +`/` serves a 404 by design + + ## How does Ingress work across 2 GCE clusters? See federation [documentation](http://kubernetes.io/docs/user-guide/federation/federated-ingress/). @@ -259,4 +292,58 @@ $ gcloud container clusters create mycluster --network "default" --num-nodes 1 \ --disk-size 50 --scopes storage-full ``` +## What GCE resources are shared between Ingresses? + +Every Ingress creates a pipeline of GCE cloud resources behind an IP. Some of +these are shared between Ingresses out of necessity, while some are shared +because there was no perceived need for duplication (all resources consume +quota and usually cost money). + +Shared: + +* Backend Services: because of low quota and high reuse. A single Service in a +Kubernetes cluster has one NodePort, common throughout the cluster. GCE has +a hard limit of the number of allowed BackendServices, so if multiple Ingresses +all point to a single Service, that creates a single BackendService in GCE +pointing to that Service's NodePort. + +* Instance Group: since an instance can only be part of a single loadbalanced +Instance Group, these must be shared. There is 1 Ingress Instance Group per +zone containing Kubernetes nodes. + +* HTTP Health Checks: currently the http health checks point at the NodePort +of a BackendService. They don't *need* to be shared, but they are since +BackendServices are shared. + +* Firewall rule: In a non-federated cluster there is a single firewall rule +that covers HTTP health check traffic from the range of [GCE loadbalancer IPs](https://cloud.google.com/compute/docs/load-balancing/http/#troubleshooting) +to Service nodePorts. + +Unique: + +Currently, a single Ingress on GCE creates a unique IP and url map. In this +model the following resources cannot be shared: +* Url Map +* Target HTTP(S) Proxies +* SSL Certificates +* Static-ip +* Forwarding rules + + +## How do I debug a controller spinloop? + +The most likely cause of a controller spin loop is some form of GCE validation +failure, eg: +* It's trying to delete a BackendService already in use, say in a UrlMap +* It's trying to add an Instance to more than 1 loadbalanced InstanceGroups +* It's trying to flip the loadbalancing algorithm on a BackendService to RATE, +when some other BackendService is pointing at the same InstanceGroup and asking +for UTILIZATION + +In all such cases, the work queue will put a single key (ingress namespace/name) +that's getting continuously requeued into exponential backoff. However, currently +the Informers that watch the Kubernetes api are setup to periodically resync, +so even though a particular key is in backoff, we might end up syncing all other +keys every, say, 10m, which might trigger the same validation-error-condition +when syncing a shared resource. diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 6f474692d..ab12588bd 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -255,7 +255,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2 name: ingress-nginx imagePullPolicy: Always ports: diff --git a/examples/PREREQUISITES.md b/examples/PREREQUISITES.md index bbb066ee4..5881f9045 100644 --- a/examples/PREREQUISITES.md +++ b/examples/PREREQUISITES.md @@ -15,6 +15,10 @@ will need to create a firewall rule that targets port 80/443 on the specific VMs the nginx controller is running on. On cloudproviders, the respective backend will auto-create firewall rules for your Ingress. +If you'd like to auto-create firewall rules for an OSS Ingress controller, +you can put it behind a Service of `Type=Loadbalancer` as shown in +[this example](/examples/static-ip/nginx#acquiring-an-ip). + ## TLS certificates Unless otherwise mentioned, the TLS secret used in examples is a 2048 bit RSA @@ -23,8 +27,8 @@ key/cert pair with an arbitrarily chosen hostname, created as follows ```console $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=nginxsvc/O=nginxsvc" Generating a 2048 bit RSA private key -......................................................................................................................................+++ -....................................................................+++ +................+++ +................+++ writing new private key to 'tls.key' ----- @@ -32,9 +36,108 @@ $ kubectl create secret tls tls-secret --key tls.key --cert tls.crt secret "tls-secret" created ``` +## CA Authentication +You can act as your very own CA, or use an existing one. As an exercise / learning, we're going to generate our +own CA, and also generate a client certificate. + +These instructions are based in CoreOS OpenSSL [instructions](https://coreos.com/kubernetes/docs/latest/openssl.html) + +### Generating a CA + +First of all, you've to generate a CA. This is going to be the one who will sign your client certificates. +In real production world, you may face CAs with intermediate certificates, as the following: + +```console +$ openssl s_client -connect www.google.com:443 +[...] +--- +Certificate chain + 0 s:/C=US/ST=California/L=Mountain View/O=Google Inc/CN=www.google.com + i:/C=US/O=Google Inc/CN=Google Internet Authority G2 + 1 s:/C=US/O=Google Inc/CN=Google Internet Authority G2 + i:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA + 2 s:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA + i:/C=US/O=Equifax/OU=Equifax Secure Certificate Authority + +``` + +To generate our CA Certificate, we've to run the following commands: + +```console +$ openssl genrsa -out ca.key 2048 +$ openssl req -x509 -new -nodes -key ca.key -days 10000 -out ca.crt -subj "/CN=example-ca" +``` + +This will generate two files: A private key (ca.key) and a public key (ca.crt). This CA is valid for 10000 days. +The ca.crt can be used later in the step of creation of CA authentication secret. + +### Generating the client certificate +The following steps generates a client certificate signed by the CA generated above. This client can be +used to authenticate in a tls-auth configured ingress. + +First, we need to generate an 'openssl.cnf' file that will be used while signing the keys: + +``` +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +``` + +Then, a user generates his very own private key (that he needs to keep secret) +and a CSR (Certificate Signing Request) that will be sent to the CA to sign and generate a certificate. + +```console +$ openssl genrsa -out client1.key 2048 +$ openssl req -new -key client1.key -out client1.csr -subj "/CN=client1" -config openssl.cnf +``` + +As the CA receives the generated 'client1.csr' file, it signs it and generates a client.crt certificate: + +```console +$ openssl x509 -req -in client1.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client1.crt -days 365 -extensions v3_req -extfile openssl.cnf +``` + +Then, you'll have 3 files: the client.key (user's private key), client.crt (user's public key) and client.csr (disposable CSR). + + +### Creating the CA Authentication secret +If you're using the CA Authentication feature, you need to generate a secret containing +all the authorized CAs. You must download them from your CA site in PEM format (like the following): + +``` +-----BEGIN CERTIFICATE----- +[....] +-----END CERTIFICATE----- +``` + +You can have as many certificates as you want. If they're in the binary DER format, +you can convert them as the following: + +```console +$ openssl x509 -in certificate.der -inform der -out certificate.crt -outform pem +``` + +Then, you've to concatenate them all in only one file, named 'ca.crt' as the following: + + +```console +$ cat certificate1.crt certificate2.crt certificate3.crt >> ca.crt +``` + +The final step is to create a secret with the content of this file. This secret is going to be used in +the TLS Auth directive: + +```console +$ kubectl create secret generic caingress --namespace=default --from-file=ca.crt +``` + ## Test HTTP Service -All examples that require a test HTTP Service use the standard echoheaders pod, +All examples that require a test HTTP Service use the standard http-svc pod, which you can deploy as follows ```console @@ -43,35 +146,35 @@ service "http-svc" created replicationcontroller "http-svc" created $ kubectl get po -NAME READY STATUS RESTARTS AGE -echoheaders-p1t3t 1/1 Running 0 1d +NAME READY STATUS RESTARTS AGE +http-svc-p1t3t 1/1 Running 0 1d $ kubectl get svc -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -echoheaders 10.0.122.116 80/TCP 1d +NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE +http-svc 10.0.122.116 80:30301/TCP 1d ``` You can test that the HTTP Service works by exposing it temporarily ```console -$ kubectl patch svc echoheaders -p '{"spec":{"type": "LoadBalancer"}}' -"echoheaders" patched +$ kubectl patch svc http-svc -p '{"spec":{"type": "LoadBalancer"}}' +"http-svc" patched -$ kubectl get svc echoheaders -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -echoheaders 10.0.122.116 80:32100/TCP 1d +$ kubectl get svc http-svc +NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE +http-svc 10.0.122.116 80:30301/TCP 1d -$ kubectl describe svc echoheaders -Name: echoheaders -Namespace: default -Labels: app=echoheaders -Selector: app=echoheaders -Type: LoadBalancer -IP: 10.0.122.116 +$ kubectl describe svc http-svc +Name: http-svc +Namespace: default +Labels: app=http-svc +Selector: app=http-svc +Type: LoadBalancer +IP: 10.0.122.116 LoadBalancer Ingress: 108.59.87.136 -Port: http 80/TCP -NodePort: http 32100/TCP -Endpoints: 10.180.1.6:8080 -Session Affinity: None +Port: http 80/TCP +NodePort: http 30301/TCP +Endpoints: 10.180.1.6:8080 +Session Affinity: None Events: FirstSeen LastSeen Count From SubObjectPath Type Reason Message --------- -------- ----- ---- ------------- -------- ------ ------- @@ -98,8 +201,8 @@ user-agent=curl/7.46.0 BODY: -no body in request- -$ kubectl patch svc echoheaders -p '{"spec":{"type": "NodePort"}}' -"echoheaders" patched +$ kubectl patch svc http-svc -p '{"spec":{"type": "NodePort"}}' +"http-svc" patched ``` ## Ingress Class diff --git a/examples/README.md b/examples/README.md index 2a00a24d7..69288dd45 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,7 +1,7 @@ # Ingress examples This directory contains a catalog of examples on how to run, configure and -scale Ingress. Please review the [prerequisities](prerequisites.md) before +scale Ingress. Please review the [prerequisities](PREREQUISITES.md) before trying them. ## Basic cross platform @@ -57,7 +57,7 @@ SNI + TCP | TLS routing based on SNI hostname | nginx | Advanced Name | Description | Platform | Complexity Level -----| ----------- | ---------- | ---------------- Basic auth | password protect your website | nginx | Intermediate -External auth plugin | defer to an external auth service | nginx | Intermediate +[External auth plugin](external-auth/nginx/README.md) | defer to an external auth service | nginx | Intermediate ## Protocols @@ -69,4 +69,15 @@ Websockets | websockets loadbalancing | nginx | Intermediate HTTP/2 | HTTP/2 loadbalancing | * | Intermediate Proxy protocol | leverage the proxy protocol for source IP | nginx | Advanced +## Custom controllers +Name | Description | Platform | Complexity Level +-----| ----------- | ---------- | ---------------- +Dummy | A simple dummy controller that logs updates | * | Advanced + +## Customization + +Name | Description | Platform | Complexity Level +-----| ----------- | ---------- | ---------------- +custom-headers | set custom headers before send traffic to backends | nginx | Advanced +configuration-snippets | customize nginx location configuration using annotations | nginx | Advanced diff --git a/examples/affinity/cookie/nginx/README.md b/examples/affinity/cookie/nginx/README.md new file mode 100644 index 000000000..51aeec310 --- /dev/null +++ b/examples/affinity/cookie/nginx/README.md @@ -0,0 +1,77 @@ +# Sticky Session + +This example demonstrates how to achieve session affinity using cookies + +## Prerequisites + +You will need to make sure you Ingress targets exactly one Ingress +controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class), +and that you have an ingress controller [running](/examples/deployment) in your cluster. + +You will also need to deploy multiple replicas of your application that show up as endpoints for the Service referenced in the Ingress object, to test session stickyness. +Using a deployment with only one replica doesn't set the 'sticky' cookie. + +## Deployment + +Session stickyness is achieved through 3 annotations on the Ingress, as shown in the [example](sticky-ingress.yaml). + +|Name|Description|Values| +| --- | --- | --- | +|ingress.kubernetes.io/affinity|Sets the affinity type|string (in NGINX only ``cookie`` is possible| +|ingress.kubernetes.io/session-cookie-name|Name of the cookie that will be used|string (default to route)| +|ingress.kubernetes.io/session-cookie-hash|Type of hash that will be used in cookie value|sha1/md5/index| + +You can create the ingress to test this + +```console +$ kubectl create -f sticky-ingress.yaml +``` + +## Validation + +You can confirm that the Ingress works. + +```console +$ kubectl describe ing nginx-test +Name: nginx-test +Namespace: default +Address: +Default backend: default-http-backend:80 (10.180.0.4:8080,10.240.0.2:8080) +Rules: + Host Path Backends + ---- ---- -------- + stickyingress.example.com + / nginx-service:80 () +Annotations: + affinity: cookie + session-cookie-hash: sha1 + session-cookie-name: route +Events: + FirstSeen LastSeen Count From SubObjectPath Type Reason Message + --------- -------- ----- ---- ------------- -------- ------ ------- + 7s 7s 1 {nginx-ingress-controller } Normal CREATE default/nginx-test + + +$ curl -I http://stickyingress.example.com +HTTP/1.1 200 OK +Server: nginx/1.11.9 +Date: Fri, 10 Feb 2017 14:11:12 GMT +Content-Type: text/html +Content-Length: 612 +Connection: keep-alive +Set-Cookie: route=a9907b79b248140b56bb13723f72b67697baac3d; Path=/; HttpOnly +Last-Modified: Tue, 24 Jan 2017 14:02:19 GMT +ETag: "58875e6b-264" +Accept-Ranges: bytes +``` +In the example above, you can see a line containing the 'Set-Cookie: route' setting the right defined stickness cookie. +This cookie is created by NGINX containing the hash of the used upstream in that request. +If the user changes this cookie, NGINX creates a new one and redirect the user to another upstream. + +If the backend pool grows up NGINX will keep sending the requests through the same server of the first request, even if it's overloaded. + +When the backend server is removed, the requests are then re-routed to another upstream server and NGINX creates a new cookie, as the previous hash became invalid. + +When you have more than one Ingress Object pointing to the same Service, but one containing affinity configuration and other don't, the first created Ingress will be used. +This means that you can face the situation that you've configured Session Affinity in one Ingress and it doesn't reflects in NGINX configuration, because there is another Ingress Object pointing to the same service that doesn't configure this. + diff --git a/examples/affinity/cookie/nginx/sticky-ingress.yaml b/examples/affinity/cookie/nginx/sticky-ingress.yaml new file mode 100644 index 000000000..69beea75e --- /dev/null +++ b/examples/affinity/cookie/nginx/sticky-ingress.yaml @@ -0,0 +1,19 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: nginx-test + annotations: + kubernetes.io/ingress.class: "nginx" + ingress.kubernetes.io/affinity: "cookie" + ingress.kubernetes.io/session-cookie-name: "route" + ingress.kubernetes.io/session-cookie-hash: "sha1" + +spec: + rules: + - host: stickyingress.example.com + http: + paths: + - backend: + serviceName: nginx-service + servicePort: 80 + path: / diff --git a/examples/auth/basic/haproxy/README.md b/examples/auth/basic/haproxy/README.md new file mode 100644 index 000000000..5c5edc78f --- /dev/null +++ b/examples/auth/basic/haproxy/README.md @@ -0,0 +1,103 @@ +# HAProxy Ingress Basic Authentication + +This example demonstrates how to configure +[Basic Authentication](https://tools.ietf.org/html/rfc2617) on +HAProxy Ingress controller. + +## Prerequisites + +This document has the following prerequisites: + +* Deploy [HAProxy Ingress controller](/examples/deployment/haproxy), you should +end up with controller, a sample web app and an ingress resource to the `foo.bar` +domain +* Feature not on stable version; use `canary` tag + +As mentioned in the deployment instructions, you MUST turn down any existing +ingress controllers before running HAProxy Ingress. + +## Using Basic Authentication + +HAProxy Ingress read user and password from `auth` file stored on secrets, one user +and password per line. Secret name, realm and type are configured with annotations +in the ingress resource: + +* `ingress.kubernetes.io/auth-type`: the only supported type is `basic` +* `ingress.kubernetes.io/auth-realm`: an optional string with authentication realm +* `ingress.kubernetes.io/auth-secret`: name of the secret + +Each line of the `auth` file should have: + +* user and insecure password separated with a pair of colons: `::`; or +* user and an encrypted password separated with colons: `:` + +HAProxy evaluates encrypted passwords with +[crypt](http://man7.org/linux/man-pages/man3/crypt.3.html) function. Use `mkpasswd` or +`makepasswd` to create it. `mkpasswd` can be found on Alpine Linux container. + +## Configure + +Create a secret to our users: + +* `john` and password `admin` using insecure plain text password +* `jane` and password `guest` using encrypted password + +```console +$ mkpasswd -m des ## a short, des encryption, syntax from Busybox on Alpine Linux +Password: (type 'guest' and press Enter) +E5BrlrQ5IXYK2 + +$ cat >auth <

401 Unauthorized

+You need a valid user and password to access this content. + +``` + +Send a valid user: + +```console +$ curl -i -u 'john:admin' 172.17.4.99:30876 -H 'Host: foo.bar' +HTTP/1.1 200 OK +Server: nginx/1.9.11 +Date: Sun, 05 Mar 2017 19:22:33 GMT +Content-Type: text/plain +Transfer-Encoding: chunked + +CLIENT VALUES: +client_address=10.2.18.5 +command=GET +real path=/ +query=nil +request_version=1.1 +request_uri=http://foo.bar:8080/ +``` + +Using `jane:guest` user/passwd should have the same output. + diff --git a/examples/auth/client-certs/nginx/README.md b/examples/auth/client-certs/nginx/README.md new file mode 100644 index 000000000..fed88598a --- /dev/null +++ b/examples/auth/client-certs/nginx/README.md @@ -0,0 +1,86 @@ +# TLS authentication + +This example demonstrates how to enable the TLS Authentication through the nginx Ingress controller. + +## Terminology + +* CA: Certificate authority signing the client cert, in this example we will play the role of a CA. +You can generate a CA cert as show in this doc. + +* CA Certificate(s) - Certificate Authority public key. Client certs must chain back to this cert, +meaning the Issuer field of some certificate in the chain leading up to the client cert must contain +the name of this CA. For purposes of this example, this is a self signed certificate. + +* CA chains: A chain of certificates where the parent has a Subject field matching the Issuer field of +the child, except for the root, which has Issuer == Subject. + +* Client Cert: Certificate used by the clients to authenticate themselves with the loadbalancer/backends. + + +## Prerequisites + +You need a valid CA File, composed of a group of valid enabled CAs. This MUST be in PEM Format. +The instructions are described [here](../../../PREREQUISITES.md#ca-authentication) + +Also your ingress must be configured as a HTTPs/TLS Ingress. + +## Deployment + +Certificate Authentication is achieved through 2 annotations on the Ingress, as shown in the [example](nginx-tls-auth.yaml). + +|Name|Description|Values| +| --- | --- | --- | +|ingress.kubernetes.io/auth-tls-secret|Sets the secret that contains the authorized CA Chain|string| +|ingress.kubernetes.io/auth-tls-verify-depth|The verification depth Certificate Authentication will make|number (default to 1)| + + +The following command instructs the controller to enable TLS authentication using the secret from the ``ingress.kubernetes.io/auth-tls-secret`` +annotation on the Ingress. Clients must present this cert to the loadbalancer, or they will receive a HTTP 400 response + +```console +$ kubectl create -f nginx-tls-auth.yaml +``` + +## Validation + +You can confirm that the Ingress works. + +```console +$ kubectl describe ing nginx-test +Name: nginx-test +Namespace: default +Address: 104.198.183.6 +Default backend: default-http-backend:80 (10.180.0.4:8080,10.240.0.2:8080) +TLS: + tls-secret terminates ingress.test.com +Rules: + Host Path Backends + ---- ---- -------- + * + http-svc:80 () +Annotations: + auth-tls-secret: default/caingress + auth-tls-verify-depth: 3 + +Events: + FirstSeen LastSeen Count From SubObjectPath Type Reason Message + --------- -------- ----- ---- ------------- -------- ------ ------- + 7s 7s 1 {nginx-ingress-controller } Normal CREATE default/nginx-test + 7s 7s 1 {nginx-ingress-controller } Normal UPDATE default/nginx-test + 7s 7s 1 {nginx-ingress-controller } Normal CREATE ip: 104.198.183.6 + 7s 7s 1 {nginx-ingress-controller } Warning MAPPING Ingress rule 'default/nginx-test' contains no path definition. Assuming / + + +$ curl -k https://ingress.test.com +HTTP/1.1 400 Bad Request +Server: nginx/1.11.9 + +$ curl -I -k --key ~/user.key --cert ~/user.cer https://ingress.test.com +HTTP/1.1 200 OK +Server: nginx/1.11.9 + +``` + +You must use the full DNS name while testing, as NGINX relies on the Server Name (SNI) to select the correct Ingress to be used. + +The curl version used here was ``curl 7.47.0`` diff --git a/examples/auth/client-certs/nginx/nginx-tls-auth.yaml b/examples/auth/client-certs/nginx/nginx-tls-auth.yaml new file mode 100644 index 000000000..23cac7b49 --- /dev/null +++ b/examples/auth/client-certs/nginx/nginx-tls-auth.yaml @@ -0,0 +1,25 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + # Create this with kubectl create secret generic caingress --from-file=ca.crt --namespace=default + ingress.kubernetes.io/auth-tls-secret: "default/caingress" + ingress.kubernetes.io/auth-tls-verify-depth: "3" + kubernetes.io/ingress.class: "nginx" + name: nginx-test + namespace: default +spec: + rules: + - host: ingress.test.com + http: + paths: + - backend: + serviceName: http-svc:80 + servicePort: 80 + path: / + tls: + - hosts: + - ingress.test.com + # Create this cert as described in 'multi-tls' example + secretName: cert + diff --git a/examples/auth/external-auth/nginx/README.md b/examples/auth/external-auth/nginx/README.md new file mode 100644 index 000000000..db522c1d2 --- /dev/null +++ b/examples/auth/external-auth/nginx/README.md @@ -0,0 +1,148 @@ +# External authentication + +### Example 1: + +Use an external service (Basic Auth) located in `https://httpbin.org` + +``` +$ kubectl create -f ingress.yaml +ingress "external-auth" created +$ kubectl get ing external-auth +NAME HOSTS ADDRESS PORTS AGE +external-auth external-auth-01.sample.com 172.17.4.99 80 13s +$ kubectl get ing external-auth -o yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + ingress.kubernetes.io/auth-url: https://httpbin.org/basic-auth/user/passwd + creationTimestamp: 2016-10-03T13:50:35Z + generation: 1 + name: external-auth + namespace: default + resourceVersion: "2068378" + selfLink: /apis/extensions/v1beta1/namespaces/default/ingresses/external-auth + uid: 5c388f1d-8970-11e6-9004-080027d2dc94 +spec: + rules: + - host: external-auth-01.sample.com + http: + paths: + - backend: + serviceName: echoheaders + servicePort: 80 + path: / +status: + loadBalancer: + ingress: + - ip: 172.17.4.99 +$ +``` + +Test 1: no username/password (expect code 401) +``` +$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' +* Rebuilt URL to: http://172.17.4.99/ +* Trying 172.17.4.99... +* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) +> GET / HTTP/1.1 +> Host: external-auth-01.sample.com +> User-Agent: curl/7.50.1 +> Accept: */* +> +< HTTP/1.1 401 Unauthorized +< Server: nginx/1.11.3 +< Date: Mon, 03 Oct 2016 14:52:08 GMT +< Content-Type: text/html +< Content-Length: 195 +< Connection: keep-alive +< WWW-Authenticate: Basic realm="Fake Realm" +< + +401 Authorization Required + +

401 Authorization Required

+
nginx/1.11.3
+ + +* Connection #0 to host 172.17.4.99 left intact +``` + +Test 2: valid username/password (expect code 200) +``` +$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:passwd' +* Rebuilt URL to: http://172.17.4.99/ +* Trying 172.17.4.99... +* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) +* Server auth using Basic with user 'user' +> GET / HTTP/1.1 +> Host: external-auth-01.sample.com +> Authorization: Basic dXNlcjpwYXNzd2Q= +> User-Agent: curl/7.50.1 +> Accept: */* +> +< HTTP/1.1 200 OK +< Server: nginx/1.11.3 +< Date: Mon, 03 Oct 2016 14:52:50 GMT +< Content-Type: text/plain +< Transfer-Encoding: chunked +< Connection: keep-alive +< +CLIENT VALUES: +client_address=10.2.60.2 +command=GET +real path=/ +query=nil +request_version=1.1 +request_uri=http://external-auth-01.sample.com:8080/ + +SERVER VALUES: +server_version=nginx: 1.9.11 - lua: 10001 + +HEADERS RECEIVED: +accept=*/* +authorization=Basic dXNlcjpwYXNzd2Q= +connection=close +host=external-auth-01.sample.com +user-agent=curl/7.50.1 +x-forwarded-for=10.2.60.1 +x-forwarded-host=external-auth-01.sample.com +x-forwarded-port=80 +x-forwarded-proto=http +x-real-ip=10.2.60.1 +BODY: +* Connection #0 to host 172.17.4.99 left intact +-no body in request- +``` + +Test 3: invalid username/password (expect code 401) +``` +curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:user' +* Rebuilt URL to: http://172.17.4.99/ +* Trying 172.17.4.99... +* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) +* Server auth using Basic with user 'user' +> GET / HTTP/1.1 +> Host: external-auth-01.sample.com +> Authorization: Basic dXNlcjp1c2Vy +> User-Agent: curl/7.50.1 +> Accept: */* +> +< HTTP/1.1 401 Unauthorized +< Server: nginx/1.11.3 +< Date: Mon, 03 Oct 2016 14:53:04 GMT +< Content-Type: text/html +< Content-Length: 195 +< Connection: keep-alive +* Authentication problem. Ignoring this. +< WWW-Authenticate: Basic realm="Fake Realm" +< + +401 Authorization Required + +

401 Authorization Required

+
nginx/1.11.3
+ + +* Connection #0 to host 172.17.4.99 left intact +``` diff --git a/examples/auth/external-auth/nginx/ingress.yaml b/examples/auth/external-auth/nginx/ingress.yaml new file mode 100644 index 000000000..1cf779ce2 --- /dev/null +++ b/examples/auth/external-auth/nginx/ingress.yaml @@ -0,0 +1,15 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + ingress.kubernetes.io/auth-url: "https://httpbin.org/basic-auth/user/passwd" + name: external-auth +spec: + rules: + - host: external-auth-01.sample.com + http: + paths: + - backend: + serviceName: echoheaders + servicePort: 80 + path: / \ No newline at end of file diff --git a/examples/custom-controller/Dockerfile b/examples/custom-controller/Dockerfile new file mode 100644 index 000000000..197ea92f7 --- /dev/null +++ b/examples/custom-controller/Dockerfile @@ -0,0 +1,20 @@ +# Copyright 2017[<0;55;12M The Kubernetes Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(ingress#191): Change this to something more appropriate, like busybox +From ubuntu:15.10 +MAINTAINER Prashanth B +RUN apt-get update && apt-get install ssl-cert -y +COPY server / +ENTRYPOINT ["/server"] diff --git a/examples/custom-controller/Makefile b/examples/custom-controller/Makefile new file mode 100644 index 000000000..2e6783689 --- /dev/null +++ b/examples/custom-controller/Makefile @@ -0,0 +1,39 @@ +# Copyright 2017 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the default backend binary or image for amd64, arm, arm64 and ppc64le +# +# Usage: +# [PREFIX=gcr.io/google_containers/dummy-ingress-controller] [ARCH=amd64] [TAG=1.1] make (server|container|push) + +all: push + +TAG=0.1 +PREFIX?=bprashanth/dummy-ingress-controller +ARCH?=amd64 +GOLANG_VERSION=1.6 +TEMP_DIR:=$(shell mktemp -d) + +server: server.go + CGO_ENABLED=0 GOOS=linux GOARCH=$(ARCH) GOARM=6 godep go build -a -installsuffix cgo -ldflags '-w' -o server ./server.go + +container: server + docker build --pull -t $(PREFIX)-$(ARCH):$(TAG) . + +push: container + gcloud docker -- push $(PREFIX)-$(ARCH):$(TAG) + +clean: + rm -f server + diff --git a/examples/custom-controller/README.md b/examples/custom-controller/README.md new file mode 100644 index 000000000..f420ae780 --- /dev/null +++ b/examples/custom-controller/README.md @@ -0,0 +1,29 @@ +# Dummy controller + +This example contains the source code of a simple dummy controller. If you want +more details on the interface, or what the generic controller is actually doing, +please read [this doc](/docs/dev/devel.md). You can deploy the controller as +follows: + +```console +$ kubectl create -f deployment.yaml +service "default-backend" created +deployment "dummy-ingress-controller" created + +$ kubectl get po +NAME READY STATUS RESTARTS AGE +dummy-ingress-controller-3685541482-082nl 1/1 Running 0 10m + +$ kubectl logs dummy-ingress-controller-3685541482-082nl +I0131 02:29:02.462123 1 launch.go:92] &{dummy 0.0.0 git-00000000 git://foo.bar.com} +I0131 02:29:02.462513 1 launch.go:221] Creating API server client for https://10.0.0.1:443 +I0131 02:29:02.494571 1 launch.go:111] validated default/default-backend as the default backend +I0131 02:29:02.503180 1 controller.go:1038] starting Ingress controller +I0131 02:29:02.513528 1 leaderelection.go:247] lock is held by dummy-ingress-controller-3685541482-50jh0 and has not yet expired +W0131 02:29:03.510699 1 queue.go:87] requeuing kube-system/kube-scheduler, err deferring sync till endpoints controller has synced +W0131 02:29:03.514445 1 queue.go:87] requeuing kube-system/node-controller-token-826dl, err deferring sync till endpoints controller has synced +2017/01/31 02:29:12 Received OnUpdate notification +2017/01/31 02:29:12 upstream-default-backend: 10.180.1.20 +``` + + diff --git a/examples/custom-controller/deployment.yaml b/examples/custom-controller/deployment.yaml new file mode 100644 index 000000000..cf924b7f7 --- /dev/null +++ b/examples/custom-controller/deployment.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: Service +metadata: + name: default-backend + namespace: default + labels: + name: default-backend + app: dummy-ingress-controller +spec: + ports: + - port: 80 + targetPort: 10254 + selector: + # Point back the the dummy controller's + # healthz port + app: dummy-ingress-controller +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: dummy-ingress-controller + namespace: default + labels: + app: dummy-ingress-controller +spec: + selector: + matchLabels: + app: dummy-ingress-controller + template: + metadata: + labels: + app: dummy-ingress-controller + spec: + containers: + - name: server + image: bprashanth/dummy-ingress-controller-amd64:0.1 + imagePullPolicy: Always + ports: + - containerPort: 10254 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - /server + - --default-backend-service=$(POD_NAMESPACE)/default-backend diff --git a/examples/custom-controller/server.go b/examples/custom-controller/server.go new file mode 100644 index 000000000..5c6eea4b7 --- /dev/null +++ b/examples/custom-controller/server.go @@ -0,0 +1,91 @@ +package main + +import ( + "log" + "net/http" + "os/exec" + "strings" + + "github.com/spf13/pflag" + + nginxconfig "k8s.io/ingress/controllers/nginx/pkg/config" + "k8s.io/ingress/core/pkg/ingress" + "k8s.io/ingress/core/pkg/ingress/controller" + "k8s.io/ingress/core/pkg/ingress/defaults" + "k8s.io/kubernetes/pkg/api" +) + +func main() { + dc := newDummyController() + ic := controller.NewIngressController(dc) + defer func() { + log.Printf("Shutting down ingress controller...") + ic.Stop() + }() + ic.Start() +} + +func newDummyController() ingress.Controller { + return &DummyController{} +} + +type DummyController struct{} + +func (dc DummyController) SetConfig(cfgMap *api.ConfigMap) { + log.Printf("Config map %+v", cfgMap) +} + +func (dc DummyController) Reload(data []byte) ([]byte, bool, error) { + out, err := exec.Command("echo", string(data)).CombinedOutput() + if err != nil { + log.Printf("Reloaded new config %s", out) + } else { + return out, false, err + } + return out, true, err +} + +func (dc DummyController) Test(file string) *exec.Cmd { + return exec.Command("echo", file) +} + +func (dc DummyController) OnUpdate(updatePayload ingress.Configuration) ([]byte, error) { + log.Printf("Received OnUpdate notification") + for _, b := range updatePayload.Backends { + eps := []string{} + for _, e := range b.Endpoints { + eps = append(eps, e.Address) + } + log.Printf("%v: %v", b.Name, strings.Join(eps, ", ")) + } + return []byte(``), nil +} + +func (dc DummyController) BackendDefaults() defaults.Backend { + // Just adopt nginx's default backend config + return nginxconfig.NewDefault().Backend +} + +func (n DummyController) Name() string { + return "dummy Controller" +} + +func (n DummyController) Check(_ *http.Request) error { + return nil +} + +func (dc DummyController) Info() *ingress.BackendInfo { + return &ingress.BackendInfo{ + Name: "dummy", + Release: "0.0.0", + Build: "git-00000000", + Repository: "git://foo.bar.com", + } +} + +func (n DummyController) OverrideFlags(*pflag.FlagSet) { +} + +func (n DummyController) SetListers(lister ingress.StoreLister) { + +} diff --git a/examples/customization/configuration-snippets/nginx/README.md b/examples/customization/configuration-snippets/nginx/README.md new file mode 100644 index 000000000..9adfa78a0 --- /dev/null +++ b/examples/customization/configuration-snippets/nginx/README.md @@ -0,0 +1,44 @@ +# Deploying the Nginx Ingress controller + +This example aims to demonstrate the deployment of an nginx ingress controller and +with the use of an annotation in the Ingress rule be able to customize the nginx +configuration. + +## Default Backend + +The default backend is a Service capable of handling all url paths and hosts the +nginx controller doesn't understand. This most basic implementation just returns +a 404 page: + +```console +$ kubectl apply -f default-backend.yaml +deployment "default-http-backend" created +service "default-http-backend" created + +$ kubectl -n kube-system get po +NAME READY STATUS RESTARTS AGE +default-http-backend-2657704409-qgwdd 1/1 Running 0 28s +``` + +```console +$ kubectl create -f nginx-load-balancer-conf.yaml +``` + +## Controller + +You can deploy the controller as follows: + +```console +$ kubectl apply -f nginx-ingress-controller.yaml +deployment "nginx-ingress-controller" created + +$ kubectl -n kube-system get po +NAME READY STATUS RESTARTS AGE +default-http-backend-2657704409-qgwdd 1/1 Running 0 2m +nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s +``` + +## Test + +Check the contents of the annotation is present in the nginx.conf file using: +`kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf` diff --git a/examples/customization/configuration-snippets/nginx/default-backend.yaml b/examples/customization/configuration-snippets/nginx/default-backend.yaml new file mode 100644 index 000000000..3c40989a3 --- /dev/null +++ b/examples/customization/configuration-snippets/nginx/default-backend.yaml @@ -0,0 +1,51 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: default-http-backend + labels: + k8s-app: default-http-backend + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: default-http-backend + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: default-http-backend + # Any image is permissable as long as: + # 1. It serves a 404 page at / + # 2. It serves 200 on a /healthz endpoint + image: gcr.io/google_containers/defaultbackend:1.0 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: default-http-backend + namespace: kube-system + labels: + k8s-app: default-http-backend +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + k8s-app: default-http-backend diff --git a/examples/customization/configuration-snippets/nginx/ingress.yaml b/examples/customization/configuration-snippets/nginx/ingress.yaml new file mode 100644 index 000000000..e60d75f90 --- /dev/null +++ b/examples/customization/configuration-snippets/nginx/ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: nginx-configuration-snippet + annotations: + kubernetes.io/ingress.class: "nginx" + ingress.kubernetes.io/configuration-snippet: | + more_set_headers "Request-Id: $request_id"; + +spec: + rules: + - host: custom.configuration.com + http: + paths: + - backend: + serviceName: http-svc + servicePort: 80 + path: / diff --git a/examples/customization/configuration-snippets/nginx/nginx-ingress-controller.yaml b/examples/customization/configuration-snippets/nginx/nginx-ingress-controller.yaml new file mode 100644 index 000000000..c4065804a --- /dev/null +++ b/examples/customization/configuration-snippets/nginx/nginx-ingress-controller.yaml @@ -0,0 +1,53 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nginx-ingress-controller + labels: + k8s-app: nginx-ingress-controller + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: nginx-ingress-controller + spec: + # hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration + # however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host + # that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used + # like with kubeadm + # hostNetwork: true + terminationGracePeriodSeconds: 60 + containers: + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2 + name: nginx-ingress-controller + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 443 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/default-http-backend + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf diff --git a/examples/customization/configuration-snippets/nginx/nginx-load-balancer-conf.yaml b/examples/customization/configuration-snippets/nginx/nginx-load-balancer-conf.yaml new file mode 100644 index 000000000..239918267 --- /dev/null +++ b/examples/customization/configuration-snippets/nginx/nginx-load-balancer-conf.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + proxy-set-headers: "kube-system/custom-headers" +kind: ConfigMap +metadata: + name: nginx-load-balancer-conf + namespace: kube-system diff --git a/examples/customization/custom-errors/nginx/README.md b/examples/customization/custom-errors/nginx/README.md new file mode 100644 index 000000000..2f79388d5 --- /dev/null +++ b/examples/customization/custom-errors/nginx/README.md @@ -0,0 +1,82 @@ +This example shows how is possible to use a custom backend to render custom error pages. The code of this example is located here [nginx-debug-server](https://github.com/aledbf/contrib/tree/nginx-debug-server) + + +The idea is to use the headers `X-Code` and `X-Format` that NGINX pass to the backend in case of an error to find out the best existent representation of the response to be returned. i.e. if the request contains an `Accept` header of type `json` the error should be in that format and not in `html` (the default in NGINX). + +First create the custom backend to use in the Ingress controller + +``` +$ kubectl create -f custom-default-backend.yaml +service "nginx-errors" created +replicationcontroller "nginx-errors" created +``` + +``` +$ kubectl get svc +NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE +echoheaders 10.3.0.7 nodes 80/TCP 23d +kubernetes 10.3.0.1 443/TCP 34d +nginx-errors 10.3.0.102 80/TCP 11s +``` + +``` +$ kubectl get rc +CONTROLLER REPLICAS AGE +echoheaders 1 19d +nginx-errors 1 19s +``` + +Next create the Ingress controller executing +``` +$ kubectl create -f rc-custom-errors.yaml +``` + +Now to check if this is working we use curl: + +``` +$ curl -v http://172.17.4.99/ +* Trying 172.17.4.99... +* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) +> GET / HTTP/1.1 +> Host: 172.17.4.99 +> User-Agent: curl/7.43.0 +> Accept: */* +> +< HTTP/1.1 404 Not Found +< Server: nginx/1.10.0 +< Date: Wed, 04 May 2016 02:53:45 GMT +< Content-Type: text/html +< Transfer-Encoding: chunked +< Connection: keep-alive +< Vary: Accept-Encoding +< +The page you're looking for could not be found. + +* Connection #0 to host 172.17.4.99 left intact +``` + +Specifying json as expected format: + +``` +$ curl -v http://172.17.4.99/ -H 'Accept: application/json' +* Trying 172.17.4.99... +* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) +> GET / HTTP/1.1 +> Host: 172.17.4.99 +> User-Agent: curl/7.43.0 +> Accept: application/json +> +< HTTP/1.1 404 Not Found +< Server: nginx/1.10.0 +< Date: Wed, 04 May 2016 02:54:00 GMT +< Content-Type: text/html +< Transfer-Encoding: chunked +< Connection: keep-alive +< Vary: Accept-Encoding +< +{ "message": "The page you're looking for could not be found" } + +* Connection #0 to host 172.17.4.99 left intact +``` + +By default the Ingress controller provides support for `html`, `json` and `XML`. diff --git a/examples/customization/custom-errors/nginx/custom-default-backend.yaml b/examples/customization/custom-errors/nginx/custom-default-backend.yaml new file mode 100644 index 000000000..fce7c0bcb --- /dev/null +++ b/examples/customization/custom-errors/nginx/custom-default-backend.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx-errors + labels: + app: nginx-errors +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: nginx-errors +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginx-errors +spec: + replicas: 1 + template: + metadata: + labels: + app: nginx-errors + spec: + containers: + - name: nginx-errors + image: aledbf/nginx-error-server:0.1 + ports: + - containerPort: 80 \ No newline at end of file diff --git a/examples/customization/custom-errors/nginx/rc-custom-errors.yaml b/examples/customization/custom-errors/nginx/rc-custom-errors.yaml new file mode 100644 index 000000000..d26dcbd5e --- /dev/null +++ b/examples/customization/custom-errors/nginx/rc-custom-errors.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginx-ingress-controller + labels: + k8s-app: nginx-ingress-lb +spec: + replicas: 1 + selector: + k8s-app: nginx-ingress-lb + template: + metadata: + labels: + k8s-app: nginx-ingress-lb + name: nginx-ingress-lb + spec: + terminationGracePeriodSeconds: 60 + containers: + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2 + name: nginx-ingress-lb + imagePullPolicy: Always + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + # use downward API + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 443 + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/nginx-errors diff --git a/examples/customization/custom-headers/nginx/README.md b/examples/customization/custom-headers/nginx/README.md new file mode 100644 index 000000000..459b43b1e --- /dev/null +++ b/examples/customization/custom-headers/nginx/README.md @@ -0,0 +1,76 @@ +# Deploying the Nginx Ingress controller + +This example aims to demonstrate the deployment of an nginx ingress controller and +use a ConfigMap to configure a custom list of headers to be passed to the upstream +server + +## Default Backend + +The default backend is a Service capable of handling all url paths and hosts the +nginx controller doesn't understand. This most basic implementation just returns +a 404 page: + +```console +$ kubectl apply -f default-backend.yaml +deployment "default-http-backend" created +service "default-http-backend" created + +$ kubectl -n kube-system get po +NAME READY STATUS RESTARTS AGE +default-http-backend-2657704409-qgwdd 1/1 Running 0 28s +``` + +## Custom configuration + +```console +$ cat nginx-load-balancer-conf.yaml +apiVersion: v1 +data: + proxy-set-headers: "default/custom-headers" +kind: ConfigMap +metadata: + name: nginx-load-balancer-conf +``` + +```console +$ kubectl create -f nginx-load-balancer-conf.yaml +``` + +## Custom headers + +```console +$ cat custom-headers.yaml +apiVersion: v1 +data: + X-Different-Name: "true" + X-Request-Start: t=${msec} + X-Using-Nginx-Controller: "true" +kind: ConfigMap +metadata: + name: proxy-headers + namespace: default + +``` + +```console +$ kubectl create -f custom-headers.yaml +``` + +## Controller + +You can deploy the controller as follows: + +```console +$ kubectl apply -f nginx-ingress-controller.yaml +deployment "nginx-ingress-controller" created + +$ kubectl -n kube-system get po +NAME READY STATUS RESTARTS AGE +default-http-backend-2657704409-qgwdd 1/1 Running 0 2m +nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s +``` + +## Test + +Check the contents of the configmap is present in the nginx.conf file using: +`kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf` diff --git a/examples/customization/custom-headers/nginx/custom-headers.yaml b/examples/customization/custom-headers/nginx/custom-headers.yaml new file mode 100644 index 000000000..beeefc8a4 --- /dev/null +++ b/examples/customization/custom-headers/nginx/custom-headers.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +data: + X-Different-Name: "true" + X-Request-Start: t=${msec} + X-Using-Nginx-Controller: "true" +kind: ConfigMap +metadata: + name: proxy-headers + namespace: kube-system diff --git a/examples/customization/custom-headers/nginx/default-backend.yaml b/examples/customization/custom-headers/nginx/default-backend.yaml new file mode 100644 index 000000000..3c40989a3 --- /dev/null +++ b/examples/customization/custom-headers/nginx/default-backend.yaml @@ -0,0 +1,51 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: default-http-backend + labels: + k8s-app: default-http-backend + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: default-http-backend + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: default-http-backend + # Any image is permissable as long as: + # 1. It serves a 404 page at / + # 2. It serves 200 on a /healthz endpoint + image: gcr.io/google_containers/defaultbackend:1.0 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: default-http-backend + namespace: kube-system + labels: + k8s-app: default-http-backend +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + k8s-app: default-http-backend diff --git a/examples/customization/custom-headers/nginx/nginx-ingress-controller.yaml b/examples/customization/custom-headers/nginx/nginx-ingress-controller.yaml new file mode 100644 index 000000000..c4065804a --- /dev/null +++ b/examples/customization/custom-headers/nginx/nginx-ingress-controller.yaml @@ -0,0 +1,53 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nginx-ingress-controller + labels: + k8s-app: nginx-ingress-controller + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: nginx-ingress-controller + spec: + # hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration + # however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host + # that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used + # like with kubeadm + # hostNetwork: true + terminationGracePeriodSeconds: 60 + containers: + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2 + name: nginx-ingress-controller + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 443 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/default-http-backend + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf diff --git a/examples/customization/custom-headers/nginx/nginx-load-balancer-conf.yaml b/examples/customization/custom-headers/nginx/nginx-load-balancer-conf.yaml new file mode 100644 index 000000000..239918267 --- /dev/null +++ b/examples/customization/custom-headers/nginx/nginx-load-balancer-conf.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + proxy-set-headers: "kube-system/custom-headers" +kind: ConfigMap +metadata: + name: nginx-load-balancer-conf + namespace: kube-system diff --git a/examples/customization/custom-template/README.md b/examples/customization/custom-template/README.md new file mode 100644 index 000000000..d2b223b51 --- /dev/null +++ b/examples/customization/custom-template/README.md @@ -0,0 +1,8 @@ +This example shows how is possible to use a custom template + +First create a configmap with a template inside running: +``` +kubectl create configmap nginx-template --from-file=nginx.tmpl=../../nginx.tmpl +``` + +Next create the rc `kubectl create -f custom-template.yaml` diff --git a/examples/customization/custom-template/custom-template.yaml b/examples/customization/custom-template/custom-template.yaml new file mode 100644 index 000000000..168b56b50 --- /dev/null +++ b/examples/customization/custom-template/custom-template.yaml @@ -0,0 +1,62 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginx-ingress-controller + labels: + k8s-app: nginx-ingress-lb +spec: + replicas: 1 + selector: + k8s-app: nginx-ingress-lb + template: + metadata: + labels: + k8s-app: nginx-ingress-lb + name: nginx-ingress-lb + spec: + terminationGracePeriodSeconds: 60 + containers: + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2 + name: nginx-ingress-lb + imagePullPolicy: Always + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + # use downward API + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 443 + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/default-http-backend + volumeMounts: + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + volumes: + - name: nginx-template-volume + configMap: + name: nginx-template + items: + - key: nginx.tmpl + path: nginx.tmpl diff --git a/examples/customization/custom-upstream-check/README.md b/examples/customization/custom-upstream-check/README.md new file mode 100644 index 000000000..de81c40ff --- /dev/null +++ b/examples/customization/custom-upstream-check/README.md @@ -0,0 +1,45 @@ +This example shows how is possible to create a custom configuration for a particular upstream associated with an Ingress rule. + +``` +echo " +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: echoheaders + annotations: + ingress.kubernetes.io/upstream-fail-timeout: "30" +spec: + rules: + - host: foo.bar.com + http: + paths: + - path: / + backend: + serviceName: echoheaders + servicePort: 80 +" | kubectl create -f - +``` + +Check the annotation is present in the Ingress rule: +``` +kubectl get ingress echoheaders -o yaml +``` + +Check the NGINX configuration is updated using kubectl or the status page: + +``` +$ kubectl exec nginx-ingress-controller-v1ppm cat /etc/nginx/nginx.conf +``` + +``` +.... + upstream default-echoheaders-x-80 { + least_conn; + server 10.2.92.2:8080 max_fails=5 fail_timeout=30; + + } +.... +``` + + +![nginx-module-vts](custom-upstream.png "screenshot with custom configuration") diff --git a/examples/customization/custom-upstream-check/custom-upstream.png b/examples/customization/custom-upstream-check/custom-upstream.png new file mode 100644 index 000000000..30417894b Binary files /dev/null and b/examples/customization/custom-upstream-check/custom-upstream.png differ diff --git a/examples/customization/ssl-dh-param/nginx/README.md b/examples/customization/ssl-dh-param/nginx/README.md new file mode 100644 index 000000000..54f3287fa --- /dev/null +++ b/examples/customization/ssl-dh-param/nginx/README.md @@ -0,0 +1,79 @@ +# Deploying the Nginx Ingress controller + +This example aims to demonstrate the deployment of an nginx ingress controller and +use a ConfigMap to configure custom Diffie-Hellman parameters file to help with +"Perfect Forward Secrecy". + +## Default Backend + +The default backend is a Service capable of handling all url paths and hosts the +nginx controller doesn't understand. This most basic implementation just returns +a 404 page: + +```console +$ kubectl apply -f default-backend.yaml +deployment "default-http-backend" created +service "default-http-backend" created + +$ kubectl -n kube-system get po +NAME READY STATUS RESTARTS AGE +default-http-backend-2657704409-qgwdd 1/1 Running 0 28s +``` + +## Custom configuration + +```console +$ cat nginx-load-balancer-conf.yaml +apiVersion: v1 +data: + ssl-dh-param: "kube-system/lb-dhparam" +kind: ConfigMap +metadata: + name: nginx-load-balancer-conf +``` + +```console +$ kubectl create -f nginx-load-balancer-conf.yaml +``` + +## Custom DH parameters secret + +```console +$> openssl dhparam 1024 2> /dev/null | base64 +LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ... +``` + +```console +$ cat ssl-dh-param.yaml +apiVersion: v1 +data: + dhparam.pem: "LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ..." +kind: Secret +type: Opaque +metadata: + name: lb-dhparam + namespace: kube-system +``` + +```console +$ kubectl create -f ssl-dh-param.yaml +``` + +## Controller + +You can deploy the controller as follows: + +```console +$ kubectl apply -f nginx-ingress-controller.yaml +deployment "nginx-ingress-controller" created + +$ kubectl -n kube-system get po +NAME READY STATUS RESTARTS AGE +default-http-backend-2657704409-qgwdd 1/1 Running 0 2m +nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s +``` + +## Test + +Check the contents of the configmap is present in the nginx.conf file using: +`kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf` diff --git a/examples/customization/ssl-dh-param/nginx/default-backend.yaml b/examples/customization/ssl-dh-param/nginx/default-backend.yaml new file mode 100644 index 000000000..3c40989a3 --- /dev/null +++ b/examples/customization/ssl-dh-param/nginx/default-backend.yaml @@ -0,0 +1,51 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: default-http-backend + labels: + k8s-app: default-http-backend + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: default-http-backend + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: default-http-backend + # Any image is permissable as long as: + # 1. It serves a 404 page at / + # 2. It serves 200 on a /healthz endpoint + image: gcr.io/google_containers/defaultbackend:1.0 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: default-http-backend + namespace: kube-system + labels: + k8s-app: default-http-backend +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + k8s-app: default-http-backend diff --git a/examples/customization/ssl-dh-param/nginx/nginx-ingress-controller.yaml b/examples/customization/ssl-dh-param/nginx/nginx-ingress-controller.yaml new file mode 100644 index 000000000..5786f03d9 --- /dev/null +++ b/examples/customization/ssl-dh-param/nginx/nginx-ingress-controller.yaml @@ -0,0 +1,53 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nginx-ingress-controller + labels: + k8s-app: nginx-ingress-controller + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: nginx-ingress-controller + spec: + # hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration + # however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host + # that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used + # like with kubeadm + # hostNetwork: true + terminationGracePeriodSeconds: 60 + containers: + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.3 + name: nginx-ingress-controller + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 443 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/default-http-backend + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf diff --git a/examples/customization/ssl-dh-param/nginx/nginx-load-balancer-conf.yaml b/examples/customization/ssl-dh-param/nginx/nginx-load-balancer-conf.yaml new file mode 100644 index 000000000..6e8858c67 --- /dev/null +++ b/examples/customization/ssl-dh-param/nginx/nginx-load-balancer-conf.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + ssl-dh-param: "kube-system/lb-dhparam" +kind: ConfigMap +metadata: + name: nginx-load-balancer-conf + namespace: kube-system diff --git a/examples/customization/ssl-dh-param/nginx/ssl-dh-param.yaml b/examples/customization/ssl-dh-param/nginx/ssl-dh-param.yaml new file mode 100644 index 000000000..14fdfb30e --- /dev/null +++ b/examples/customization/ssl-dh-param/nginx/ssl-dh-param.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + dhparam.pem: "...base64 encoded data..." +kind: Secret +type: Opaque +metadata: + name: lb-dhparam + namespace: kube-system diff --git a/examples/daemonset/haproxy/README.md b/examples/daemonset/haproxy/README.md new file mode 100644 index 000000000..75fe32eb5 --- /dev/null +++ b/examples/daemonset/haproxy/README.md @@ -0,0 +1,62 @@ +# Haproxy Ingress DaemonSet + +In some cases, the Ingress controller will be required to be run at all the nodes in cluster. Using [DaemonSet](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/daemon.md) can achieve this requirement. + +## Prerequisites + +This ingress controller doesn't yet have support for +[ingress classes](/examples/PREREQUISITES.md#ingress-class). You MUST turn +down any existing ingress controllers before running HAProxy Ingress controller or +they will fight for Ingresses. This includes any cloudprovider controller. + +This document has also the following prerequisites: + +* Create a [TLS secret](/examples/PREREQUISITES.md#tls-certificates) named `tls-secret` to be used as default TLS certificate + +Creating the TLS secret: + +```console +$ openssl req \ + -x509 -newkey rsa:2048 -nodes -days 365 \ + -keyout tls.key -out tls.crt -subj '/CN=localhost' +$ kubectl create secret tls tls-secret --cert=tls.crt --key=tls.key +$ rm -v tls.crt tls.key +``` + +## Default Backend + +The default backend is a service of handling all url paths and hosts the haproxy controller doesn't understand. Deploy the default-http-backend as follow: + +```console +$ kubectl apply -f ../../deployment/nginx/default-backend.yaml +deployment "default-http-backend" configured +service "default-http-backend" configured + +$ kubectl -n kube-system get svc +NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default-http-backend 192.168.3.4 80/TCP 30m + +$ kubectl -n kube-system get pods +NAME READY STATUS RESTARTS AGE +default-http-backend-q5sb6 1/1 Running 0 30m +``` + +## Ingress DaemonSet + +Deploy the daemonset as follows: + +```console +$ kubectl apply -f haproxy-ingress-daemonset.yaml +``` + +Check if the controller was successfully deployed: +```console +$ kubectl -n kube-system get ds +NAME DESIRED CURRENT READY NODE-SELECTOR AGE +haproxy-ingress 2 2 2 45s + +$ kubectl -n kube-system get pods +NAME READY STATUS RESTARTS AGE +default-http-backend-q5sb6 1/1 Running 0 45m +haproxy-ingress-km32x 1/1 Running 0 1m +``` diff --git a/examples/daemonset/haproxy/haproxy-ingress-daemonset.yaml b/examples/daemonset/haproxy/haproxy-ingress-daemonset.yaml new file mode 100644 index 000000000..6d6d689ad --- /dev/null +++ b/examples/daemonset/haproxy/haproxy-ingress-daemonset.yaml @@ -0,0 +1,35 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + labels: + run: haproxy-ingress + name: haproxy-ingress +spec: + template: + metadata: + labels: + run: haproxy-ingress + spec: + containers: + - name: haproxy-ingress + image: quay.io/jcmoraisjr/haproxy-ingress + imagePullPolicy: IfNotPresent + args: + - --default-backend-service=default/default-http-backend + - --default-ssl-certificate=default/tls-secret + ports: + - name: http + containerPort: 80 + - name: https + containerPort: 443 + - name: stat + containerPort: 1936 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace diff --git a/examples/daemonset/nginx/README.md b/examples/daemonset/nginx/README.md new file mode 100644 index 000000000..04ee2a443 --- /dev/null +++ b/examples/daemonset/nginx/README.md @@ -0,0 +1,40 @@ +# Nginx Ingress DaemonSet + +In some cases, the Ingress controller will be required to be run at all the nodes in cluster. Using [DaemonSet](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/daemon.md) can achieve this requirement. + +## Default Backend + +The default backend is a service of handling all url paths and hosts the nginx controller doesn't understand. Deploy the default-http-backend as follow: + +```console +$ kubectl apply -f ../../deployment/nginx/default-backend.yaml +deployment "default-http-backend" configured +service "default-http-backend" configured + +$ kubectl -n kube-system get svc +NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default-http-backend 192.168.3.6 80/TCP 1h + +$ kubectl -n kube-system get po +NAME READY STATUS RESTARTS AGE +default-http-backend-2657704409-6b47n 1/1 Running 0 1h +``` + +## Ingress DaemonSet + +Deploy the daemonset as follows: + +```console +$ kubectl apply -f nginx-ingress-daemonset.yaml +daemonset "nginx-ingress-lb" created + +$ kubectl -n kube-system get ds +NAME DESIRED CURRENT READY NODE-SELECTOR AGE +nginx-ingress-lb 2 2 2 21s + +$ kubectl -n kube-system get po +NAME READY STATUS RESTARTS AGE +default-http-backend-2657704409-6b47n 1/1 Running 0 2h +nginx-ingress-lb-8381i 1/1 Running 0 56s +nginx-ingress-lb-h54gf 1/1 Running 0 56s +``` diff --git a/examples/daemonset/nginx/nginx-ingress-daemonset.yaml b/examples/daemonset/nginx/nginx-ingress-daemonset.yaml new file mode 100644 index 000000000..1b476d670 --- /dev/null +++ b/examples/daemonset/nginx/nginx-ingress-daemonset.yaml @@ -0,0 +1,47 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: nginx-ingress-lb + labels: + name: nginx-ingress-lb + namespace: kube-system +spec: + template: + metadata: + labels: + name: nginx-ingress-lb + spec: + terminationGracePeriodSeconds: 60 + containers: + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2 + name: nginx-ingress-lb + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 443 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/default-http-backend + diff --git a/examples/deployment/gce/README.md b/examples/deployment/gce/README.md new file mode 100644 index 000000000..1c8ef12db --- /dev/null +++ b/examples/deployment/gce/README.md @@ -0,0 +1,78 @@ +# Deploying the GCE Ingress controller + +This example demonstrates the deployment of a GCE Ingress controller. + +Note: __all GCE/GKE clusters already have an Ingress controller running +on the master. The only reason to deploy another GCE controller is if you want +to debug or otherwise observe its operation (eg via kubectl logs). Before +deploying another one in your cluster, make sure you disable the master +controller.__ + +## Disabling the master controller + +As of Kubernetes 1.3, GLBC runs as a static pod on the master. If you want to +totally disable it, you can ssh into the master node and delete the GLBC +manifest file found at `/etc/kubernetes/manifests/glbc.manifest`. You can also +disable it on GKE at cluster bring-up time through the `disable-addons` flag: + +```console +gcloud container clusters create mycluster --network "default" --num-nodes 1 \ +--machine-type n1-standard-2 --zone $ZONE \ +--disable-addons HttpLoadBalancing \ +--disk-size 50 --scopes storage-full +``` + +## Deploying a new controller + +The following command deploys a GCE Ingress controller in your cluster + +```console +$ kubectl create -f gce-ingress-controller.yaml +service "default-http-backend" created +replicationcontroller "l7-lb-controller" created + +$ kubectl get po -l name=glbc +NAME READY STATUS RESTARTS AGE +l7-lb-controller-1s22c 2/2 Running 0 27s +``` + +now you can create an Ingress and observe the controller + +```console +$ kubectl create -f gce-tls-ingress.yaml +ingress "test" created + +$ kubectl logs l7-lb-controller-1s22c -c l7-lb-controller +I0201 01:03:17.387548 1 main.go:179] Starting GLBC image: glbc:0.9.2, cluster name +I0201 01:03:18.459740 1 main.go:291] Using saved cluster uid "32658fa96c080068" +I0201 01:03:18.459771 1 utils.go:122] Changing cluster name from to 32658fa96c080068 +I0201 01:03:18.461652 1 gce.go:331] Using existing Token Source &oauth2.reuseTokenSource{new:google.computeSource{account:""}, mu:sync.Mutex{state:0, sema:0x0}, t:(*oauth2.Token)(nil)} +I0201 01:03:18.553142 1 cluster_manager.go:264] Created GCE client without a config file +I0201 01:03:18.553773 1 controller.go:234] Starting loadbalancer controller +I0201 01:04:58.314271 1 event.go:217] Event(api.ObjectReference{Kind:"Ingress", Namespace:"default", Name:"test", UID:"73549716-e81a-11e6-a8c5-42010af00002", APIVersion:"extensions", ResourceVersion:"673016", FieldPath:""}): type: 'Normal' reason: 'ADD' default/test +I0201 01:04:58.413616 1 instances.go:76] Creating instance group k8s-ig--32658fa96c080068 in zone us-central1-b +I0201 01:05:01.998169 1 gce.go:2084] Adding port 30301 to instance group k8s-ig--32658fa96c080068 with 0 ports +I0201 01:05:02.444014 1 backends.go:149] Creating backend for 1 instance groups, port 30301 named port &{port30301 30301 []} +I0201 01:05:02.444175 1 utils.go:495] No pod in service http-svc with node port 30301 has declared a matching readiness probe for health checks. +I0201 01:05:02.555599 1 healthchecks.go:62] Creating health check k8s-be-30301--32658fa96c080068 +I0201 01:05:11.300165 1 gce.go:2084] Adding port 31938 to instance group k8s-ig--32658fa96c080068 with 1 ports +I0201 01:05:11.743914 1 backends.go:149] Creating backend for 1 instance groups, port 31938 named port &{port31938 31938 []} +I0201 01:05:11.744008 1 utils.go:495] No pod in service default-http-backend with node port 31938 has declared a matching readiness probe for health checks. +I0201 01:05:11.811972 1 healthchecks.go:62] Creating health check k8s-be-31938--32658fa96c080068 +I0201 01:05:19.871791 1 loadbalancers.go:121] Creating l7 default-test--32658fa96c080068 +... + +$ kubectl get ing test +NAME HOSTS ADDRESS PORTS AGE +test * 35.186.208.106 80, 443 4m + +$ curl 35.186.208.106 -kL +CLIENT VALUES: +client_address=10.180.3.1 +command=GET +real path=/ +query=nil +request_version=1.1 +request_uri=http://35.186.208.106:8080/ +... +``` diff --git a/examples/deployment/gce/gce-ingress-controller.yaml b/examples/deployment/gce/gce-ingress-controller.yaml new file mode 100644 index 000000000..8bbee4bba --- /dev/null +++ b/examples/deployment/gce/gce-ingress-controller.yaml @@ -0,0 +1,82 @@ +apiVersion: v1 +kind: Service +metadata: + # This must match the --default-backend-service argument of the l7 lb + # controller and is required because GCE mandates a default backend. + name: default-http-backend + labels: + k8s-app: glbc +spec: + # The default backend must be of type NodePort. + type: NodePort + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + k8s-app: glbc + +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: l7-lb-controller + labels: + k8s-app: glbc + version: v0.9.0 +spec: + # There should never be more than 1 controller alive simultaneously. + replicas: 1 + selector: + k8s-app: glbc + version: v0.9.0 + template: + metadata: + labels: + k8s-app: glbc + version: v0.9.0 + name: glbc + spec: + terminationGracePeriodSeconds: 600 + containers: + - name: default-http-backend + # Any image is permissable as long as: + # 1. It serves a 404 page at / + # 2. It serves 200 on a /healthz endpoint + image: gcr.io/google_containers/defaultbackend:1.0 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + - image: gcr.io/google_containers/glbc:0.9.2 + livenessProbe: + httpGet: + path: /healthz + port: 8081 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + name: l7-lb-controller + resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 100m + memory: 50Mi + args: + - --default-backend-service=default/default-http-backend + - --sync-period=300s diff --git a/examples/tls-termination/gce-tls-ingress.yaml b/examples/deployment/gce/gce-tls-ingress.yaml similarity index 100% rename from examples/tls-termination/gce-tls-ingress.yaml rename to examples/deployment/gce/gce-tls-ingress.yaml diff --git a/examples/deployment/haproxy/README.md b/examples/deployment/haproxy/README.md new file mode 100644 index 000000000..b03541e66 --- /dev/null +++ b/examples/deployment/haproxy/README.md @@ -0,0 +1,151 @@ +# Deploying HAProxy Ingress Controller + +If you don't have a Kubernetes cluster, please refer to [setup](/docs/dev/setup.md) +for instructions on how to create a new one. + +## Prerequisites + +This ingress controller doesn't yet have support for +[ingress classes](/examples/PREREQUISITES.md#ingress-class). You MUST turn +down any existing ingress controllers before running HAProxy Ingress controller or +they will fight for Ingresses. This includes any cloudprovider controller. + +This document has also the following prerequisites: + +* Deploy a [web app](/examples/PREREQUISITES.md#test-http-service) for testing +* Create a [TLS secret](/examples/PREREQUISITES.md#tls-certificates) named `tls-secret` to be used as default TLS certificate + +The web app can be created as follow: + +```console +$ kubectl run http-svc \ + --image=gcr.io/google_containers/echoserver:1.3 \ + --port=8080 \ + --replicas=2 \ + --expose +``` + +Creating the TLS secret: + +```console +$ openssl req \ + -x509 -newkey rsa:2048 -nodes -days 365 \ + -keyout tls.key -out tls.crt -subj '/CN=localhost' +$ kubectl create secret tls tls-secret --cert=tls.crt --key=tls.key +$ rm -v tls.crt tls.key +``` + +## Default backend + +Deploy a default backend used to serve `404 Not Found` pages: + +```console +$ kubectl run ingress-default-backend \ + --image=gcr.io/google_containers/defaultbackend:1.0 \ + --port=8080 \ + --limits=cpu=10m,memory=20Mi \ + --expose +``` + +Check if the default backend is up and running: + +```console +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +ingress-default-backend-1110790216-gqr61 1/1 Running 0 10s +``` + +## Controller + +Deploy HAProxy Ingress: + +```console +$ kubectl create -f haproxy-ingress.yaml +``` + +Check if the controller was successfully deployed: + +```console +$ kubectl get pod -w +NAME READY STATUS RESTARTS AGE +haproxy-ingress-2556761959-tv20k 1/1 Running 0 12s +ingress-default-backend-1110790216-gqr61 1/1 Running 0 3m +^C +``` + +Deploy the ingress resource of our already deployed web app: + +```console +$ kubectl create -f - <` +- OAUTH2_PROXY_CLIENT_SECRET with the github `` +- OAUTH2_PROXY_COOKIE_SECRET with value of `python -c 'import os,base64; print base64.b64encode(os.urandom(16))'` + +4. Customize the contents of the file dashboard-ingress.yaml: + +Replace `__INGRESS_HOST__` with a valid FQDN and `__INGRESS_SECRET__` with a Secret with a valid SSL certificate. + +5. Deploy the oauth2 proxy and the ingress rules running: +```console +$ kubectl create -f oauth2-proxy.yaml,dashboard-ingress.yaml +``` + +Test the oauth integration accessing the configured URL, like `https://foo.bar.com` + + +![Register OAuth2 Application](images/github-auth.png) + +![Github authentication](images/oauth-login.png) + +![Kubernetes dashboard](images/dashboard.png) diff --git a/examples/external-auth/nginx/dashboard-ingress.yaml b/examples/external-auth/nginx/dashboard-ingress.yaml new file mode 100644 index 000000000..642e38f5b --- /dev/null +++ b/examples/external-auth/nginx/dashboard-ingress.yaml @@ -0,0 +1,38 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + ingress.kubernetes.io/auth-signin: https://$host/oauth2/sign_in + ingress.kubernetes.io/auth-url: https://$host/oauth2/auth + name: external-auth-oauth2 + namespace: kube-system +spec: + rules: + - host: __INGRESS_HOST__ + http: + paths: + - backend: + serviceName: kubernetes-dashboard + servicePort: 80 + path: / + +--- + +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: oauth2-proxy + namespace: kube-system +spec: + rules: + - host: __INGRESS_HOST__ + http: + paths: + - backend: + serviceName: oauth2-proxy + servicePort: 4180 + path: /oauth2 + tls: + - hosts: + - __INGRESS_HOST__ + secretName: __INGRESS_SECRET__ diff --git a/examples/external-auth/nginx/images/dashboard.png b/examples/external-auth/nginx/images/dashboard.png new file mode 100644 index 000000000..3acb7bb11 Binary files /dev/null and b/examples/external-auth/nginx/images/dashboard.png differ diff --git a/examples/external-auth/nginx/images/github-auth.png b/examples/external-auth/nginx/images/github-auth.png new file mode 100644 index 000000000..a7ee97d7e Binary files /dev/null and b/examples/external-auth/nginx/images/github-auth.png differ diff --git a/examples/external-auth/nginx/images/oauth-login.png b/examples/external-auth/nginx/images/oauth-login.png new file mode 100644 index 000000000..c8f7f8b17 Binary files /dev/null and b/examples/external-auth/nginx/images/oauth-login.png differ diff --git a/examples/external-auth/nginx/images/register-oauth-app-2.png b/examples/external-auth/nginx/images/register-oauth-app-2.png new file mode 100644 index 000000000..ef69149bb Binary files /dev/null and b/examples/external-auth/nginx/images/register-oauth-app-2.png differ diff --git a/examples/external-auth/nginx/images/register-oauth-app.png b/examples/external-auth/nginx/images/register-oauth-app.png new file mode 100644 index 000000000..9d6baa87e Binary files /dev/null and b/examples/external-auth/nginx/images/register-oauth-app.png differ diff --git a/examples/external-auth/nginx/oauth2-proxy.yaml b/examples/external-auth/nginx/oauth2-proxy.yaml new file mode 100644 index 000000000..1735f4690 --- /dev/null +++ b/examples/external-auth/nginx/oauth2-proxy.yaml @@ -0,0 +1,56 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + k8s-app: oauth2-proxy + name: oauth2-proxy + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: oauth2-proxy + template: + metadata: + labels: + k8s-app: oauth2-proxy + spec: + containers: + - args: + - --provider=github + - --email-domain=* + - --upstream=file:///dev/null + - --http-address=0.0.0.0:4180 + # Register a new application + # https://github.com/settings/applications/new + env: + - name: OAUTH2_PROXY_CLIENT_ID + value: + - name: OAUTH2_PROXY_CLIENT_SECRET + value: + # python -c 'import os,base64; print base64.b64encode(os.urandom(16))' + - name: OAUTH2_PROXY_COOKIE_SECRET + value: SECRET + image: docker.io/colemickens/oauth2_proxy:latest + imagePullPolicy: Always + name: oauth2-proxy + ports: + - containerPort: 4180 + protocol: TCP + +--- + +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: oauth2-proxy + name: oauth2-proxy +spec: + ports: + - name: http + port: 4180 + protocol: TCP + targetPort: 4180 + selector: + k8s-app: oauth2-proxy diff --git a/examples/health-checks/gce.md b/examples/health-checks/gce.md deleted file mode 100644 index 9d3831a6f..000000000 --- a/examples/health-checks/gce.md +++ /dev/null @@ -1,3 +0,0 @@ -# Health checks for the GCE controller - -Placeholder diff --git a/examples/health-checks/gce/README.md b/examples/health-checks/gce/README.md new file mode 100644 index 000000000..25d2049dc --- /dev/null +++ b/examples/health-checks/gce/README.md @@ -0,0 +1,74 @@ +# Simple HTTP health check example + +The GCE Ingress controller adopts the readiness probe from the matching endpoints, provided the readiness probe doesn't require HTTPS or special headers. + +Create the following app: +```console +$ kubectl create -f health_check_app.yaml +replicationcontroller "echoheaders" created +You have exposed your service on an external port on all nodes in your +cluster. If you want to expose this service to the external internet, you may +need to set up firewall rules for the service port(s) (tcp:31165) to serve traffic. + +See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details. +service "echoheadersx" created +You have exposed your service on an external port on all nodes in your +cluster. If you want to expose this service to the external internet, you may +need to set up firewall rules for the service port(s) (tcp:31020) to serve traffic. + +See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details. +service "echoheadersy" created +ingress "echomap" created +``` + +You should soon find an Ingress that is backed by a GCE Loadbalancer. + +```console +$ kubectl describe ing echomap +Name: echomap +Namespace: default +Address: 107.178.255.228 +Default backend: default-http-backend:80 (10.180.0.9:8080,10.240.0.2:8080) +Rules: + Host Path Backends + ---- ---- -------- + foo.bar.com + /foo echoheadersx:80 () + bar.baz.com + /bar echoheadersy:80 () + /foo echoheadersx:80 () +Annotations: + target-proxy: k8s-tp-default-echomap--a9d60e8176d933ee + url-map: k8s-um-default-echomap--a9d60e8176d933ee + backends: {"k8s-be-31020--a9d60e8176d933ee":"HEALTHY","k8s-be-31165--a9d60e8176d933ee":"HEALTHY","k8s-be-31686--a9d60e8176d933ee":"HEALTHY"} + forwarding-rule: k8s-fw-default-echomap--a9d60e8176d933ee +Events: + FirstSeen LastSeen Count From SubobjectPath Type Reason Message + --------- -------- ----- ---- ------------- -------- ------ ------- + 17m 17m 1 {loadbalancer-controller } Normal ADD default/echomap + 15m 15m 1 {loadbalancer-controller } Normal CREATE ip: 107.178.255.228 + +$ curl 107.178.255.228/foo -H 'Host:foo.bar.com' +CLIENT VALUES: +client_address=10.240.0.5 +command=GET +real path=/foo +query=nil +request_version=1.1 +request_uri=http://foo.bar.com:8080/foo +... +``` + +You can confirm the health check endpoint point it's using one of 2 ways: +* Through the cloud console: compute > health checks > lookup your health check. It takes the form k8s-be-nodePort-hash, where nodePort in the example above is 31165 and 31020, as shown by the kubectl output. +* Through gcloud: Run `gcloud compute http-health-checks list` + +## Limitations + +A few points to note: +* The readiness probe must be exposed on the port matching the `servicePort` specified in the Ingress +* The readiness probe cannot have special requirements, like headers or HTTPS +* The probe timeouts are translated to GCE health check timeouts +* You must create the pods backing the endpoints with the given readiness probe. This *will not* work if you update the replication controller with a different readiness probe. + + diff --git a/examples/health-checks/gce/health_check_app.yaml b/examples/health-checks/gce/health_check_app.yaml new file mode 100644 index 000000000..b6f79c641 --- /dev/null +++ b/examples/health-checks/gce/health_check_app.yaml @@ -0,0 +1,82 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: echoheaders +spec: + replicas: 1 + template: + metadata: + labels: + app: echoheaders + spec: + containers: + - name: echoheaders + image: gcr.io/google_containers/echoserver:1.4 + ports: + - containerPort: 8080 + readinessProbe: + httpGet: + path: /healthz + port: 8080 + periodSeconds: 1 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 10 +--- +apiVersion: v1 +kind: Service +metadata: + name: echoheadersx + labels: + app: echoheaders +spec: + type: NodePort + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: echoheaders +--- +apiVersion: v1 +kind: Service +metadata: + name: echoheadersy + labels: + app: echoheaders +spec: + type: NodePort + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: echoheaders +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: echomap +spec: + rules: + - host: foo.bar.com + http: + paths: + - path: /foo + backend: + serviceName: echoheadersx + servicePort: 80 + - host: bar.baz.com + http: + paths: + - path: /bar + backend: + serviceName: echoheadersy + servicePort: 80 + - path: /foo + backend: + serviceName: echoheadersx + servicePort: 80 + diff --git a/examples/multi-tls/README.md b/examples/multi-tls/README.md new file mode 100644 index 000000000..ef4cd65bb --- /dev/null +++ b/examples/multi-tls/README.md @@ -0,0 +1,94 @@ +# Multi TLS certificate termination + +This examples uses 2 different certificates to terminate SSL for 2 hostnames. + +1. Deploy the controller by creating the rc in the parent dir +2. Create tls secrets for foo.bar.com and bar.baz.com as indicated in the yaml +3. Create multi-tls.yaml + +This should generate a segment like: +```console +$ kubectl exec -it nginx-ingress-controller-6vwd1 -- cat /etc/nginx/nginx.conf | grep "foo.bar.com" -B 7 -A 35 + server { + listen 80; + listen 443 ssl http2; + ssl_certificate /etc/nginx-ssl/default-foobar.pem; + ssl_certificate_key /etc/nginx-ssl/default-foobar.pem; + + + server_name foo.bar.com; + + + if ($scheme = http) { + return 301 https://$host$request_uri; + } + + + + location / { + proxy_set_header Host $host; + + # Pass Real IP + proxy_set_header X-Real-IP $remote_addr; + + # Allow websocket connections + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Proto $pass_access_scheme; + + proxy_connect_timeout 5s; + proxy_send_timeout 60s; + proxy_read_timeout 60s; + + proxy_redirect off; + proxy_buffering off; + + proxy_http_version 1.1; + + proxy_pass http://default-echoheaders-80; + } +``` + +And you should be able to reach your nginx service or echoheaders service using a hostname switch: +```console +$ kubectl get ing +NAME RULE BACKEND ADDRESS AGE +foo-tls - 104.154.30.67 13m + foo.bar.com + / echoheaders:80 + bar.baz.com + / nginx:80 + +$ curl https://104.154.30.67 -H 'Host:foo.bar.com' -k +CLIENT VALUES: +client_address=10.245.0.6 +command=GET +real path=/ +query=nil +request_version=1.1 +request_uri=http://foo.bar.com:8080/ + +SERVER VALUES: +server_version=nginx: 1.9.11 - lua: 10001 + +HEADERS RECEIVED: +accept=*/* +connection=close +host=foo.bar.com +user-agent=curl/7.35.0 +x-forwarded-for=10.245.0.1 +x-forwarded-host=foo.bar.com +x-forwarded-proto=https + +$ curl https://104.154.30.67 -H 'Host:bar.baz.com' -k + + + +Welcome to nginx on Debian! + +$ curl 104.154.30.67 +default backend - 404 +``` \ No newline at end of file diff --git a/examples/multi-tls/multi-tls.yaml b/examples/multi-tls/multi-tls.yaml new file mode 100644 index 000000000..f65feffaf --- /dev/null +++ b/examples/multi-tls/multi-tls.yaml @@ -0,0 +1,102 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: nginx +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginx +spec: + replicas: 1 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: gcr.io/google_containers/nginx + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: echoheaders + labels: + app: echoheaders +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: echoheaders +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: echoheaders +spec: + replicas: 1 + template: + metadata: + labels: + app: echoheaders + spec: + containers: + - name: echoheaders + image: gcr.io/google_containers/echoserver:1.4 + ports: + - containerPort: 8080 +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: foo-tls + namespace: default +spec: + tls: + - hosts: + - foo.bar.com + # This secret must exist beforehand + # The cert must also contain the subj-name foo.bar.com + # You can create it via: + # make keys secret SECRET=/tmp/foobar.json HOST=foo.bar.com NAME=foobar + # https://github.com/kubernetes/contrib/tree/master/ingress/controllers/gce/https_example + secretName: foobar + - hosts: + - bar.baz.com + # This secret must exist beforehand + # The cert must also contain the subj-name bar.baz.com + # You can create it via: + # make keys secret SECRET=/tmp/barbaz.json HOST=bar.baz.com NAME=barbaz + # https://github.com/kubernetes/contrib/tree/master/ingress/controllers/gce/https_example + secretName: barbaz + rules: + - host: foo.bar.com + http: + paths: + - backend: + serviceName: echoheaders + servicePort: 80 + path: / + - host: bar.baz.com + http: + paths: + - backend: + serviceName: nginx + servicePort: 80 + path: / \ No newline at end of file diff --git a/examples/rewrite/README.md b/examples/rewrite/README.md new file mode 100644 index 000000000..a878d52ea --- /dev/null +++ b/examples/rewrite/README.md @@ -0,0 +1,66 @@ +Create an Ingress rule with a rewrite annotation: +``` +$ echo " +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + annotations: + ingress.kubernetes.io/rewrite-target: / + name: rewrite + namespace: default +spec: + rules: + - host: rewrite.bar.com + http: + paths: + - backend: + serviceName: echoheaders + servicePort: 80 + path: /something +" | kubectl create -f - +``` + +Check the rewrite is working + +``` +$ curl -v http://172.17.4.99/something -H 'Host: rewrite.bar.com' +* Trying 172.17.4.99... +* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) +> GET /something HTTP/1.1 +> Host: rewrite.bar.com +> User-Agent: curl/7.43.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Server: nginx/1.11.0 +< Date: Tue, 31 May 2016 16:07:31 GMT +< Content-Type: text/plain +< Transfer-Encoding: chunked +< Connection: keep-alive +< +CLIENT VALUES: +client_address=10.2.56.9 +command=GET +real path=/ +query=nil +request_version=1.1 +request_uri=http://rewrite.bar.com:8080/ + +SERVER VALUES: +server_version=nginx: 1.9.11 - lua: 10001 + +HEADERS RECEIVED: +accept=*/* +connection=close +host=rewrite.bar.com +user-agent=curl/7.43.0 +x-forwarded-for=10.2.56.1 +x-forwarded-host=rewrite.bar.com +x-forwarded-port=80 +x-forwarded-proto=http +x-real-ip=10.2.56.1 +BODY: +* Connection #0 to host 172.17.4.99 left intact +-no body in request- +``` + diff --git a/examples/scaling-deployment/nginx/README.md b/examples/scaling-deployment/nginx/README.md new file mode 100644 index 000000000..e38174015 --- /dev/null +++ b/examples/scaling-deployment/nginx/README.md @@ -0,0 +1,41 @@ +# Deploying multi Nginx Ingress Controllers + +This example aims to demonstrate the Deployment of multi nginx ingress controllers. + +## Default Backend + +The default backend is a service of handling all url paths and hosts the nginx controller doesn't understand. Deploy the default-http-backend as follow: + +```console +$ kubectl apply -f ../../deployment/nginx/default-backend.yaml +deployment "default-http-backend" configured +service "default-http-backend" configured + +$ kubectl -n kube-system get svc +NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default-http-backend 192.168.3.52 80/TCP 6m + +$ kubectl -n kube-system get po +NAME READY STATUS RESTARTS AGE +default-http-backend-2657704409-wz6o3 1/1 Running 0 6m +``` + +## Ingress Deployment + +Deploy the Deployment of multi controllers as follows: + +```console +$ kubectl apply -f nginx-ingress-deployment.yaml +deployment "nginx-ingress-controller" created + +$ kubectl -n kube-system get deployment +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +default-http-backend 1 1 1 1 16m +nginx-ingress-controller 2 2 2 2 24s + +$ kubectl -n kube-system get po +NAME READY STATUS RESTARTS AGE +default-http-backend-2657704409-wz6o3 1/1 Running 0 16m +nginx-ingress-controller-3752011415-0qbi6 1/1 Running 0 39s +nginx-ingress-controller-3752011415-vi8fq 1/1 Running 0 39s +``` diff --git a/examples/scaling-deployment/nginx/nginx-ingress-deployment.yaml b/examples/scaling-deployment/nginx/nginx-ingress-deployment.yaml new file mode 100644 index 000000000..f0ee65b31 --- /dev/null +++ b/examples/scaling-deployment/nginx/nginx-ingress-deployment.yaml @@ -0,0 +1,47 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nginx-ingress-controller + labels: + k8s-app: nginx-ingress-controller + namespace: kube-system +spec: + replicas: 2 + template: + metadata: + labels: + k8s-app: nginx-ingress-controller + spec: + terminationGracePeriodSeconds: 60 + containers: + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2 + name: nginx-ingress-controller + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 443 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/default-http-backend diff --git a/examples/static-ip/gce.md b/examples/static-ip/gce.md deleted file mode 100644 index 92f004160..000000000 --- a/examples/static-ip/gce.md +++ /dev/null @@ -1,3 +0,0 @@ -# Static IPs through the GCE Ingress controller - -Placeholder diff --git a/examples/static-ip/gce/README.md b/examples/static-ip/gce/README.md new file mode 100644 index 000000000..07917ac50 --- /dev/null +++ b/examples/static-ip/gce/README.md @@ -0,0 +1,129 @@ +# Static IPs + +This example demonstrates how to assign a [static-ip](https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) to an Ingress on GCE. + +## Prerequisites + +You need a [TLS cert](/examples/PREREQUISITES.md#tls-certificates) and a [test HTTP service](/examples/PREREQUISITES.md#test-http-service) for this example. +You will also need to make sure you Ingress targets exactly one Ingress +controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class), +and that you have an ingress controller [running](/examples/deployment) in your cluster. + +## Acquiring a static IP + +In GCE, static IP belongs to a given project until the owner decides to release +it. If you create a static IP and assign it to an Ingress, deleting the Ingress +or tearing down the GKE cluster *will not* delete the static IP. You can check +the static IPs you have as follows + +```console +$ gcloud compute addresses list --global +NAME REGION ADDRESS STATUS +test-ip 35.186.221.137 RESERVED + +$ gcloud compute addresses list +NAME REGION ADDRESS STATUS +test-ip 35.186.221.137 RESERVED +test-ip us-central1 35.184.21.228 RESERVED +``` + +Note the difference between a regional and a global static ip. Only global +static-ips will work with Ingress. If you don't already have an IP, you can +create it + +```console +$ gcloud compute addresses create test-ip --global +Created [https://www.googleapis.com/compute/v1/projects/kubernetesdev/global/addresses/test-ip]. +--- +address: 35.186.221.137 +creationTimestamp: '2017-01-31T10:32:29.889-08:00' +description: '' +id: '9221457935391876818' +kind: compute#address +name: test-ip +selfLink: https://www.googleapis.com/compute/v1/projects/kubernetesdev/global/addresses/test-ip +status: RESERVED +``` + +## Assigning a static IP to an Ingress + +You can now add the static IP from the previous step to an Ingress, +by specifying the `kubernetes.io/global-static-ip-name` annotation, +the example yaml in this directory already has it set to `test-ip` + +```console +$ kubectl create -f gce-static-ip-ingress.yaml +ingress "static-ip" created + +$ gcloud compute addresses list test-ip +NAME REGION ADDRESS STATUS +test-ip 35.186.221.137 IN_USE +test-ip us-central1 35.184.21.228 RESERVED + +$ kubectl get ing +NAME HOSTS ADDRESS PORTS AGE +static-ip * 35.186.221.137 80, 443 1m + +$ curl 35.186.221.137 -Lk +CLIENT VALUES: +client_address=10.180.1.1 +command=GET +real path=/ +query=nil +request_version=1.1 +request_uri=http://35.186.221.137:8080/ +... +``` + +## Retaining the static IP + +You can test retention by deleting the Ingress + +```console +$ kubectl delete -f gce-static-ip-ingress.yaml +ingress "static-ip" deleted + +$ kubectl get ing +No resources found. + +$ gcloud compute addresses list test-ip --global +NAME REGION ADDRESS STATUS +test-ip 35.186.221.137 RESERVED +``` + +## Promote ephemeral to static IP + +If you simply create a HTTP Ingress resource, it gets an ephemeral IP + +```console +$ kubectl create -f gce-http-ingress.yaml +ingress "http-ingress" created + +$ kubectl get ing +NAME HOSTS ADDRESS PORTS AGE +http-ingress * 35.186.195.33 80 1h + +$ gcloud compute forwarding-rules list +NAME REGION IP_ADDRESS IP_PROTOCOL TARGET +k8s-fw-default-http-ingress--32658fa96c080068 35.186.195.33 TCP k8s-tp-default-http-ingress--32658fa96c080068 +``` + +Note that because this is an ephemeral IP, it won't show up in the output of +`gcloud compute addresses list`. + +If you either directly create an Ingress with a TLS section, or modify a HTTP +Ingress to have a TLS section, it gets a static IP. + +```console +$ kubectl patch ing http-ingress -p '{"spec":{"tls":[{"secretName":"tls-secret"}]}}' +"http-ingress" patched + +$ kubectl get ing +NAME HOSTS ADDRESS PORTS AGE +http-ingress * 35.186.195.33 80, 443 1h + +$ gcloud compute addresses list +NAME REGION ADDRESS STATUS +k8s-fw-default-http-ingress--32658fa96c080068 35.186.195.33 IN_USE +``` + diff --git a/examples/static-ip/gce/gce-http-ingress.yaml b/examples/static-ip/gce/gce-http-ingress.yaml new file mode 100644 index 000000000..ca0e34ca5 --- /dev/null +++ b/examples/static-ip/gce/gce-http-ingress.yaml @@ -0,0 +1,12 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: http-ingress + annotations: + kubernetes.io/ingress.class: "gce" +spec: + backend: + # This assumes http-svc exists and routes to healthy endpoints. + serviceName: http-svc + servicePort: 80 + diff --git a/examples/static-ip/gce/gce-static-ip-ingress.yaml b/examples/static-ip/gce/gce-static-ip-ingress.yaml new file mode 100644 index 000000000..7742b8705 --- /dev/null +++ b/examples/static-ip/gce/gce-static-ip-ingress.yaml @@ -0,0 +1,19 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: static-ip + # Assumes a global static ip with the same name exists. + # You can acquire a static IP by running + # gcloud compute addresses create test-ip --global + annotations: + kubernetes.io/ingress.global-static-ip-name: "test-ip" + kubernetes.io/ingress.class: "gce" +spec: + tls: + # This assumes tls-secret exists. + - secretName: tls-secret + backend: + # This assumes http-svc exists and routes to healthy endpoints. + serviceName: http-svc + servicePort: 80 + diff --git a/examples/static-ip/nginx/README.md b/examples/static-ip/nginx/README.md new file mode 100644 index 000000000..67df63a57 --- /dev/null +++ b/examples/static-ip/nginx/README.md @@ -0,0 +1,119 @@ +# Static IPs + + +This example demonstrates how to assign a static-ip to an Ingress on through +the Nginx controller. + +## Prerequisites + +You need a [TLS cert](/examples/PREREQUISITES.md#tls-certificates) and a [test HTTP service](/examples/PREREQUISITES.md#test-http-service) for this example. +You will also need to make sure you Ingress targets exactly one Ingress +controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class), +and that you have an ingress controller [running](/examples/deployment) in your cluster. + +## Acquiring an IP + +Since instances of the nginx controller actually run on nodes in your cluster, +by default nginx Ingresses will only get static IPs if your cloudprovider +supports static IP assignments to nodes. On GKE/GCE for example, even though +nodes get static IPs, the IPs are not retained across upgrade. + +To acquire a static IP for the nginx ingress controller, simply put it +behind a Service of `Type=LoadBalancer`. + +First, create a loadbalancer Service and wait for it to acquire an IP + +```console +$ kubectl create -f static-ip-svc.yaml +service "nginx-ingress-lb" created + +$ kubectl get svc nginx-ingress-lb +NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE +nginx-ingress-lb 10.0.138.113 104.154.109.191 80:31457/TCP,443:32240/TCP 15m +``` + +then, update the ingress controller so it adopts the static IP of the Service +by passing the `--publish-service` flag (the example yaml used in the next step +already has it set to "nginx-ingress-lb"). + +```console +$ kubectl create -f nginx-ingress-controller.yaml +deployment "nginx-ingress-controller" created +``` + +## Assigning the IP to an Ingress + +From here on every Ingress created with the `ingress.class` annotation set to +`nginx` will get the IP allocated in the previous step + +```console +$ kubectl create -f nginx-ingress.yaml +ingress "nginx-ingress" created + +$ kubectl get ing nginx-ingress +NAME HOSTS ADDRESS PORTS AGE +nginx-ingress * 104.154.109.191 80, 443 13m + +$ curl 104.154.109.191 -kL +CLIENT VALUES: +client_address=10.180.1.25 +command=GET +real path=/ +query=nil +request_version=1.1 +request_uri=http://104.154.109.191:8080/ +... +``` + +## Retaining the IP + +You can test retention by deleting the Ingress + +```console +$ kubectl delete ing nginx-ingress +ingress "nginx-ingress" deleted + +$ kubectl create -f nginx-ingress.yaml +ingress "nginx-ingress" created + +$ kubectl get ing nginx-ingress +NAME HOSTS ADDRESS PORTS AGE +nginx-ingress * 104.154.109.191 80, 443 13m +``` + +Note that unlike the GCE Ingress, the same loadbalancer IP is shared amongst all +Ingresses, because all requests are proxied through the same set of nginx +controllers. + +## Promote ephemeral to static IP + +To promote the allocated IP to static, you can update the Service manifest + +```console +$ kubectl patch svc nginx-ingress-lb -p '{"spec": {"loadBalancerIP": "104.154.109.191"}}' +"nginx-ingress-lb" patched +``` + +and promote the IP to static (promotion works differently for cloudproviders, +provided example is for GKE/GCE) +` +```console +$ gcloud compute addresses create nginx-ingress-lb --addresses 104.154.109.191 --region us-central1 +Created [https://www.googleapis.com/compute/v1/projects/kubernetesdev/regions/us-central1/addresses/nginx-ingress-lb]. +--- +address: 104.154.109.191 +creationTimestamp: '2017-01-31T16:34:50.089-08:00' +description: '' +id: '5208037144487826373' +kind: compute#address +name: nginx-ingress-lb +region: us-central1 +selfLink: https://www.googleapis.com/compute/v1/projects/kubernetesdev/regions/us-central1/addresses/nginx-ingress-lb +status: IN_USE +users: +- us-central1/forwardingRules/a09f6913ae80e11e6a8c542010af0000 +``` + +Now even if the Service is deleted, the IP will persist, so you can recreate the +Service with `spec.loadBalancerIP` set to `104.154.109.191`. + diff --git a/examples/static-ip/nginx/nginx-ingress-controller.yaml b/examples/static-ip/nginx/nginx-ingress-controller.yaml new file mode 100644 index 000000000..d6eb1d512 --- /dev/null +++ b/examples/static-ip/nginx/nginx-ingress-controller.yaml @@ -0,0 +1,52 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nginx-ingress-controller + labels: + k8s-app: nginx-ingress-controller +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: nginx-ingress-controller + spec: + # hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration + # however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host + # that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used + # like with kubeadm + # hostNetwork: true + terminationGracePeriodSeconds: 60 + containers: + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2 + name: nginx-ingress-controller + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 443 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/default-http-backend + - --publish-service=$(POD_NAMESPACE)/nginx-ingress-lb diff --git a/examples/static-ip/nginx/nginx-ingress.yaml b/examples/static-ip/nginx/nginx-ingress.yaml new file mode 100644 index 000000000..6cdd81fc8 --- /dev/null +++ b/examples/static-ip/nginx/nginx-ingress.yaml @@ -0,0 +1,17 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: nginx-ingress + annotations: + kubernetes.io/ingress.class: "nginx" +spec: + tls: + # This assumes tls-secret exists. + - secretName: tls-secret + rules: + - http: + paths: + - backend: + # This assumes http-svc exists and routes to healthy endpoints. + serviceName: http-svc + servicePort: 80 diff --git a/examples/static-ip/nginx/static-ip-svc.yaml b/examples/static-ip/nginx/static-ip-svc.yaml new file mode 100644 index 000000000..57d02d1ac --- /dev/null +++ b/examples/static-ip/nginx/static-ip-svc.yaml @@ -0,0 +1,23 @@ +# This is the backend service +apiVersion: v1 +kind: Service +metadata: + name: nginx-ingress-lb + annotations: + service.beta.kubernetes.io/external-traffic: OnlyLocal + labels: + app: nginx-ingress-lb +spec: + type: LoadBalancer + loadBalancerIP: 104.154.109.191 + ports: + - port: 80 + name: http + targetPort: 80 + - port: 443 + name: https + targetPort: 443 + selector: + # Selects nginx-ingress-controller pods + k8s-app: nginx-ingress-controller + diff --git a/examples/tls-termination/gce.md b/examples/tls-termination/gce/README.md similarity index 95% rename from examples/tls-termination/gce.md rename to examples/tls-termination/gce/README.md index ef2b99544..bc674d8a2 100644 --- a/examples/tls-termination/gce.md +++ b/examples/tls-termination/gce/README.md @@ -6,7 +6,8 @@ This example demonstrates how to terminate TLS through the GCE Ingress controlle You need a [TLS cert](/examples/PREREQUISITES.md#tls-certificates) and a [test HTTP service](/examples/PREREQUISITES.md#test-http-service) for this example. You will also need to make sure you Ingress targets exactly one Ingress -controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class). +controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class), +and that you have an ingress controller [running](/examples/deployment) in your cluster. ## Deployment diff --git a/examples/tls-termination/gce/gce-tls-ingress.yaml b/examples/tls-termination/gce/gce-tls-ingress.yaml new file mode 100644 index 000000000..705a17d36 --- /dev/null +++ b/examples/tls-termination/gce/gce-tls-ingress.yaml @@ -0,0 +1,15 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: test + annotations: + kubernetes.io/ingress.class: "gce" +spec: + tls: + # This assumes tls-secret exists. + - secretName: tls-secret + backend: + # This assumes http-svc exists and routes to healthy endpoints. + serviceName: http-svc + servicePort: 80 + diff --git a/examples/tls-termination/haproxy/README.md b/examples/tls-termination/haproxy/README.md new file mode 100644 index 000000000..e019a00ee --- /dev/null +++ b/examples/tls-termination/haproxy/README.md @@ -0,0 +1,116 @@ +# HAProxy Ingress TLS termination + +## Prerequisites + +This document has the following prerequisites: + +* Deploy [HAProxy Ingress controller](/examples/deployment/haproxy), you should end up with controller, a sample web app and default TLS secret +* Create [*another* secret](/examples/PREREQUISITES.md#tls-certificates) named `foobar-ssl` and subject `'/CN=foo.bar'` + +As mentioned in the deployment instructions, you MUST turn down any existing +ingress controllers before running HAProxy Ingress. + +## Using default TLS certificate + +Update ingress resource in order to add TLS termination to host `foo.bar`: + +```console +$ kubectl replace -f ingress-tls-default.yaml +``` + +The difference from the starting ingress resource: + +```console + metadata: + name: app + spec: ++ tls: ++ - hosts: ++ - foo.bar + rules: + - host: foo.bar + http: +``` + +Trying default backend: + +```console +$ curl -iL 172.17.4.99:30876 +HTTP/1.1 404 Not Found +Date: Tue, 07 Feb 2017 00:06:07 GMT +Content-Length: 21 +Content-Type: text/plain; charset=utf-8 + +default backend - 404 +``` + +Now telling the controller we are `foo.bar`: + +```console +$ curl -iL 172.17.4.99:30876 -H 'Host: foo.bar' +HTTP/1.1 302 Found +Cache-Control: no-cache +Content-length: 0 +Location: https://foo.bar/ +Connection: close +^C +``` + +Note the `Location` header - this would redirect us to the correct server. + +Checking the default certificate - change below `31692` to the TLS port: + +```console +$ openssl s_client -connect 172.17.4.99:31692 +... +subject=/CN=localhost +issuer=/CN=localhost +--- +``` + +... and `foo.bar` certificate: + +```console +$ openssl s_client -connect 172.17.4.99:31692 -servername foo.bar +... +subject=/CN=localhost +issuer=/CN=localhost +--- +``` + +## Using a new TLS certificate + +Now let's reference the new certificate to our domain. Note that secret +`foobar-ssl` should be created as described in the [prerequisites](#prerequisites) + +```console +$ kubectl replace -f ingress-tls-foobar.yaml +``` + +Here is the difference: + +```console + tls: + - hosts: + - foo.bar ++ secretName: foobar-ssl + rules: + - host: foo.bar + http: +``` + +Now `foo.bar` certificate should be used to terminate TLS: + +```console +$ openssl s_client -connect 172.17.4.99:31692 +... +subject=/CN=localhost +issuer=/CN=localhost +--- + +$ openssl s_client -connect 172.17.4.99:31692 -servername foo.bar +... +subject=/CN=foo.bar +issuer=/CN=foo.bar +--- +``` diff --git a/examples/tls-termination/haproxy/ingress-tls-default.yaml b/examples/tls-termination/haproxy/ingress-tls-default.yaml new file mode 100644 index 000000000..7c5034645 --- /dev/null +++ b/examples/tls-termination/haproxy/ingress-tls-default.yaml @@ -0,0 +1,16 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: app +spec: + tls: + - hosts: + - foo.bar + rules: + - host: foo.bar + http: + paths: + - path: / + backend: + serviceName: http-svc + servicePort: 8080 diff --git a/examples/tls-termination/haproxy/ingress-tls-foobar.yaml b/examples/tls-termination/haproxy/ingress-tls-foobar.yaml new file mode 100644 index 000000000..e15f9428b --- /dev/null +++ b/examples/tls-termination/haproxy/ingress-tls-foobar.yaml @@ -0,0 +1,17 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: app +spec: + tls: + - hosts: + - foo.bar + secretName: foobar-ssl + rules: + - host: foo.bar + http: + paths: + - path: / + backend: + serviceName: http-svc + servicePort: 8080 diff --git a/examples/tls-termination/nginx.md b/examples/tls-termination/nginx/README.md similarity index 94% rename from examples/tls-termination/nginx.md rename to examples/tls-termination/nginx/README.md index b116cad09..8ad3ac142 100644 --- a/examples/tls-termination/nginx.md +++ b/examples/tls-termination/nginx/README.md @@ -6,7 +6,8 @@ This example demonstrates how to terminate TLS through the nginx Ingress control You need a [TLS cert](/examples/PREREQUISITES.md#tls-certificates) and a [test HTTP service](/examples/PREREQUISITES.md#test-http-service) for this example. You will also need to make sure you Ingress targets exactly one Ingress -controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class). +controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class), +and that you have an ingress controller [running](/examples/deployment) in your cluster. ## Deployment diff --git a/examples/tls-termination/nginx-tls-ingress.yaml b/examples/tls-termination/nginx/nginx-tls-ingress.yaml similarity index 100% rename from examples/tls-termination/nginx-tls-ingress.yaml rename to examples/tls-termination/nginx/nginx-tls-ingress.yaml diff --git a/images/OWNERS b/images/OWNERS new file mode 100644 index 000000000..79db51eae --- /dev/null +++ b/images/OWNERS @@ -0,0 +1,5 @@ +approvers: +- aledbf +reviewers: +- bprashanth +- aledbf diff --git a/images/nginx-slim/Dockerfile b/images/nginx-slim/Dockerfile new file mode 100644 index 000000000..5efd6b40b --- /dev/null +++ b/images/nginx-slim/Dockerfile @@ -0,0 +1,29 @@ +# Copyright 2015 The Kubernetes Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +FROM gcr.io/google_containers/ubuntu-slim:0.7 + +COPY build.sh /tmp + +RUN /tmp/build.sh + +# Create symlinks to redirect nginx logs to stdout and stderr docker log collector +# This only works if nginx is started with CMD or ENTRYPOINT +RUN ln -sf /dev/stdout /var/log/nginx/access.log +RUN ln -sf /dev/stderr /var/log/nginx/error.log + +EXPOSE 80 443 + +CMD ["nginx", "-g", "daemon off;"] diff --git a/images/nginx-slim/Makefile b/images/nginx-slim/Makefile new file mode 100644 index 000000000..4b52337e1 --- /dev/null +++ b/images/nginx-slim/Makefile @@ -0,0 +1,14 @@ +all: push + +# 0.0.0 shouldn't clobber any released builds +TAG = 0.14 +PREFIX = gcr.io/google_containers/nginx-slim + +container: + docker build --pull -t $(PREFIX):$(TAG) . + +push: container + gcloud docker -- push $(PREFIX):$(TAG) + +clean: + docker rmi -f $(PREFIX):$(TAG) || true diff --git a/images/nginx-slim/README.md b/images/nginx-slim/README.md new file mode 100644 index 000000000..8d93e3fa4 --- /dev/null +++ b/images/nginx-slim/README.md @@ -0,0 +1,24 @@ + +nginx 1.11.x base image using [ubuntu-slim](https://github.com/kubernetes/contrib/tree/master/images/ubuntu-slim) + +nginx [engine x] is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP proxy server. + +This custom nginx image contains: +- [lua](https://github.com/openresty/lua-nginx-module) support +- [stream](http://nginx.org/en/docs/stream/ngx_stream_core_module.html) tcp support for upstreams +- nginx stats [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) +- [Dynamic TLS record sizing](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/) + + +**How to use this image:** +This image does provides a default configuration file with no backend servers. + +*Using docker* +``` +$ docker run -v /some/nginx.con:/etc/nginx/nginx.conf:ro gcr.io/google_containers/nginx-slim:0.12 +``` + +*Creating a replication controller* +``` +$ kubectl create -f ./rc.yaml +``` diff --git a/images/nginx-slim/build.sh b/images/nginx-slim/build.sh new file mode 100755 index 000000000..2236e1369 --- /dev/null +++ b/images/nginx-slim/build.sh @@ -0,0 +1,223 @@ +#!/bin/sh + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e + +export NGINX_VERSION=1.11.10 +export NDK_VERSION=0.3.0 +export VTS_VERSION=0.1.11 +export SETMISC_VERSION=0.31 +export LUA_VERSION=0.10.7 +export STICKY_SESSIONS_VERSION=08a395c66e42 +export LUA_CJSON_VERSION=2.1.0.4 +export LUA_RESTY_HTTP_VERSION=0.07 +export LUA_UPSTREAM_VERSION=0.06 +export MORE_HEADERS_VERSION=0.32 +export NGINX_DIGEST_AUTH=7955af9c77598c697ac292811914ce1e2b3b824c +export NGINX_SUBSTITUTIONS=bc58cb11844bc42735bbaef7085ea86ace46d05b + +export BUILD_PATH=/tmp/build + +get_src() +{ + hash="$1" + url="$2" + f=$(basename "$url") + + curl -sSL "$url" -o "$f" + echo "$hash $f" | sha256sum -c - || exit 10 + tar xzf "$f" + rm -rf "$f" +} + +mkdir "$BUILD_PATH" +cd "$BUILD_PATH" + +# install required packages to build +apt-get update && apt-get install --no-install-recommends -y \ + bash \ + build-essential \ + curl ca-certificates \ + libgeoip1 \ + libgeoip-dev \ + patch \ + libpcre3 \ + libpcre3-dev \ + libssl-dev \ + zlib1g \ + zlib1g-dev \ + libaio1 \ + libaio-dev \ + luajit \ + openssl \ + libluajit-5.1 \ + libluajit-5.1-dev \ + linux-headers-generic || exit 1 + +# download, verify and extract the source files +get_src 778b3cabb07633f754cd9dee32fc8e22582bce22bfa407be76a806abd935533d \ + "http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz" + +get_src 88e05a99a8a7419066f5ae75966fb1efc409bad4522d14986da074554ae61619 \ + "https://github.com/simpl/ngx_devel_kit/archive/v$NDK_VERSION.tar.gz" + +get_src 97946a68937b50ab8637e1a90a13198fe376d801dc3e7447052e43c28e9ee7de \ + "https://github.com/openresty/set-misc-nginx-module/archive/v$SETMISC_VERSION.tar.gz" + +get_src 31db853251a631a6b6a0b96b10806c9c32eda3c3d08fe46a38ff944b22dba636 \ + "https://github.com/vozlt/nginx-module-vts/archive/v$VTS_VERSION.tar.gz" + +get_src c21c8937dcdd6fc2b6a955f929e3f4d1388610f47180e60126e6dcab06786f77 \ + "https://github.com/openresty/lua-nginx-module/archive/v$LUA_VERSION.tar.gz" + +get_src 5417991b6db4d46383da2d18f2fd46b93fafcebfe87ba87f7cfeac4c9bcb0224 \ + "https://github.com/openresty/lua-cjson/archive/$LUA_CJSON_VERSION.tar.gz" + +get_src 1c6aa06c9955397c94e9c3e0c0fba4e2704e85bee77b4512fb54ae7c25d58d86 \ + "https://github.com/pintsized/lua-resty-http/archive/v$LUA_RESTY_HTTP_VERSION.tar.gz" + +get_src c6d9dab8ea1fc997031007e2e8f47cced01417e203cd88d53a9fe9f6ae138720 \ + "https://github.com/openresty/headers-more-nginx-module/archive/v$MORE_HEADERS_VERSION.tar.gz" + +get_src 55475fe4f9e4b5220761269ccf0069ebb1ded61d7e7888f9c785c651cff3d141 \ + "https://github.com/openresty/lua-upstream-nginx-module/archive/v$LUA_UPSTREAM_VERSION.tar.gz" + +get_src 53e440737ed1aff1f09fae150219a45f16add0c8d6e84546cb7d80f73ebffd90 \ + "https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng/get/$STICKY_SESSIONS_VERSION.tar.gz" + +get_src 9b1d0075df787338bb607f14925886249bda60b6b3156713923d5d59e99a708b \ + "https://github.com/atomx/nginx-http-auth-digest/archive/$NGINX_DIGEST_AUTH.tar.gz" + +get_src 8eabbcd5950fdcc718bb0ef9165206c2ed60f67cd9da553d7bc3e6fe4e338461 \ + "https://github.com/yaoweibin/ngx_http_substitutions_filter_module/archive/$NGINX_SUBSTITUTIONS.tar.gz" + + +#https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/ +curl -sSL -o nginx__dynamic_tls_records.patch https://raw.githubusercontent.com/cloudflare/sslconfig/master/patches/nginx__1.11.5_dynamic_tls_records.patch + +# build nginx +cd "$BUILD_PATH/nginx-$NGINX_VERSION" + +echo "Applying tls nginx patches..." +patch -p1 < $BUILD_PATH/nginx__dynamic_tls_records.patch + +./configure \ + --prefix=/usr/share/nginx \ + --conf-path=/etc/nginx/nginx.conf \ + --http-log-path=/var/log/nginx/access.log \ + --error-log-path=/var/log/nginx/error.log \ + --lock-path=/var/lock/nginx.lock \ + --pid-path=/run/nginx.pid \ + --http-client-body-temp-path=/var/lib/nginx/body \ + --http-fastcgi-temp-path=/var/lib/nginx/fastcgi \ + --http-proxy-temp-path=/var/lib/nginx/proxy \ + --http-scgi-temp-path=/var/lib/nginx/scgi \ + --http-uwsgi-temp-path=/var/lib/nginx/uwsgi \ + --with-debug \ + --with-pcre-jit \ + --with-http_ssl_module \ + --with-http_stub_status_module \ + --with-http_realip_module \ + --with-http_auth_request_module \ + --with-http_addition_module \ + --with-http_dav_module \ + --with-http_geoip_module \ + --with-http_gzip_static_module \ + --with-http_sub_module \ + --with-http_v2_module \ + --with-stream \ + --with-stream_ssl_module \ + --with-stream_ssl_preread_module \ + --with-threads \ + --with-file-aio \ + --without-mail_pop3_module \ + --without-mail_smtp_module \ + --without-mail_imap_module \ + --without-http_uwsgi_module \ + --without-http_scgi_module \ + --with-cc-opt='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=generic' \ + --add-module="$BUILD_PATH/ngx_devel_kit-$NDK_VERSION" \ + --add-module="$BUILD_PATH/set-misc-nginx-module-$SETMISC_VERSION" \ + --add-module="$BUILD_PATH/nginx-module-vts-$VTS_VERSION" \ + --add-module="$BUILD_PATH/lua-nginx-module-$LUA_VERSION" \ + --add-module="$BUILD_PATH/headers-more-nginx-module-$MORE_HEADERS_VERSION" \ + --add-module="$BUILD_PATH/nginx-goodies-nginx-sticky-module-ng-$STICKY_SESSIONS_VERSION" \ + --add-module="$BUILD_PATH/nginx-http-auth-digest-$NGINX_DIGEST_AUTH" \ + --add-module="$BUILD_PATH/ngx_http_substitutions_filter_module-$NGINX_SUBSTITUTIONS" \ + --add-module="$BUILD_PATH/lua-upstream-nginx-module-$LUA_UPSTREAM_VERSION" || exit 1 \ + && make || exit 1 \ + && make install || exit 1 + +echo "Installing CJSON module" +cd "$BUILD_PATH/lua-cjson-$LUA_CJSON_VERSION" +make LUA_INCLUDE_DIR=/usr/include/luajit-2.0 && make install + +echo "Installing lua-resty-http module" +# copy lua module +cd "$BUILD_PATH/lua-resty-http-$LUA_RESTY_HTTP_VERSION" +sed -i 's/resty.http_headers/http_headers/' $BUILD_PATH/lua-resty-http-$LUA_RESTY_HTTP_VERSION/lib/resty/http.lua +cp $BUILD_PATH/lua-resty-http-$LUA_RESTY_HTTP_VERSION/lib/resty/http.lua /usr/local/lib/lua/5.1 +cp $BUILD_PATH/lua-resty-http-$LUA_RESTY_HTTP_VERSION/lib/resty/http_headers.lua /usr/local/lib/lua/5.1 + +echo "Cleaning..." + +cd / + +apt-mark unmarkauto \ + bash \ + curl ca-certificates \ + libgeoip1 \ + libpcre3 \ + zlib1g \ + libaio1 \ + luajit \ + libluajit-5.1-2 \ + xz-utils \ + geoip-bin \ + openssl + +apt-get remove -y --purge \ + build-essential \ + gcc-5 \ + cpp-5 \ + libgeoip-dev \ + libpcre3-dev \ + libssl-dev \ + zlib1g-dev \ + libaio-dev \ + libluajit-5.1-dev \ + linux-libc-dev \ + perl-modules-5.22 \ + linux-headers-generic + +apt-get autoremove -y + +mkdir -p /var/lib/nginx/body /usr/share/nginx/html + +mv /usr/share/nginx/sbin/nginx /usr/sbin + +rm -rf "$BUILD_PATH" +rm -Rf /usr/share/man /usr/share/doc +rm -rf /tmp/* /var/tmp/* +rm -rf /var/lib/apt/lists/* +rm -rf /var/cache/apt/archives/* + +# Download of GeoIP databases +curl -sSL -o /etc/nginx/GeoIP.dat.gz http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz \ + && curl -sSL -o /etc/nginx/GeoLiteCity.dat.gz http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz \ + && gunzip /etc/nginx/GeoIP.dat.gz \ + && gunzip /etc/nginx/GeoLiteCity.dat.gz diff --git a/images/nginx-slim/rc.yaml b/images/nginx-slim/rc.yaml new file mode 100644 index 000000000..248308d0f --- /dev/null +++ b/images/nginx-slim/rc.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginxslimsvc + labels: + app: nginxslim +spec: + type: NodePort + ports: + - port: 80 + protocol: TCP + name: http + selector: + app: nginxslim +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginxslim +spec: + replicas: 1 + selector: + app: nginxslim + template: + metadata: + labels: + app: nginxslim + name: frontend + spec: + containers: + - name: nginxslim + image: gcr.io/google_containers/nginx-slim:0.12 + ports: + - containerPort: 80 diff --git a/images/ubuntu-slim/Dockerfile b/images/ubuntu-slim/Dockerfile new file mode 100644 index 000000000..05b6e8dd8 --- /dev/null +++ b/images/ubuntu-slim/Dockerfile @@ -0,0 +1,5 @@ +FROM scratch + +ADD rootfs.tar / + +CMD ["/bin/bash"] diff --git a/images/ubuntu-slim/Dockerfile.build b/images/ubuntu-slim/Dockerfile.build new file mode 100644 index 000000000..20949f7d6 --- /dev/null +++ b/images/ubuntu-slim/Dockerfile.build @@ -0,0 +1,53 @@ +FROM ubuntu:16.04 + +ENV DEBIAN_FRONTEND=noninteractive + +COPY excludes /etc/dpkg/dpkg.cfg.d/excludes + +RUN apt-get update \ + && apt-get dist-upgrade -y + +# no-op script removes the need for systemd-sysv +COPY runlevel /sbin/runlevel + +# hold required packages to avoid breaking the installation of packages +RUN apt-mark hold apt gnupg adduser passwd libsemanage1 + +# dpkg --get-selections | grep -v deinstall +RUN echo "Yes, do as I say!" | apt-get purge \ + e2fslibs \ + libcap2-bin \ + libkmod2 \ + libmount1 \ + libncursesw5 \ + libprocps4 \ + libsmartcols1 \ + libudev1 \ + ncurses-base \ + ncurses-bin \ + locales \ + tzdata + +# cleanup +RUN apt-get autoremove -y && \ + apt-get clean -y && \ + tar -czf /usr/share/copyrights.tar.gz /usr/share/common-licenses /usr/share/doc/*/copyright && \ + rm -rf \ + /usr/share/doc \ + /usr/share/man \ + /usr/share/info \ + /usr/share/locale \ + /var/lib/apt/lists/* \ + /var/log/* \ + /var/cache/debconf/* \ + /usr/share/common-licenses* \ + ~/.bashrc \ + /etc/systemd \ + /lib/lsb \ + /lib/udev \ + /usr/lib/x86_64-linux-gnu/gconv/IBM* \ + /usr/lib/x86_64-linux-gnu/gconv/EBC* && \ + mkdir -p /usr/share/man/man1 /usr/share/man/man2 \ + /usr/share/man/man3 /usr/share/man/man4 \ + /usr/share/man/man5 /usr/share/man/man6 \ + /usr/share/man/man7 /usr/share/man/man8 diff --git a/images/ubuntu-slim/Makefile b/images/ubuntu-slim/Makefile new file mode 100755 index 000000000..07fd5c88f --- /dev/null +++ b/images/ubuntu-slim/Makefile @@ -0,0 +1,22 @@ +all: push + +TAG ?= 0.7 +PREFIX ?= gcr.io/google_containers/ubuntu-slim +BUILD_IMAGE ?= ubuntu-build +TAR_FILE ?= rootfs.tar +PUSH_TOOL ?= gcloud + +container: clean + docker build --pull -t $(BUILD_IMAGE) -f Dockerfile.build . + docker create --name $(BUILD_IMAGE) $(BUILD_IMAGE) + docker export $(BUILD_IMAGE) > $(TAR_FILE) + docker build --pull -t $(PREFIX):$(TAG) . + +push: container + $(PUSH_TOOL) docker push $(PREFIX):$(TAG) + +clean: + docker rmi -f $(PREFIX):$(TAG) || true + docker rmi -f $(BUILD_IMAGE) || true + docker rm -f $(BUILD_IMAGE) || true + rm -f $(TAR_FILE) diff --git a/images/ubuntu-slim/README.md b/images/ubuntu-slim/README.md new file mode 100644 index 000000000..c41e70f72 --- /dev/null +++ b/images/ubuntu-slim/README.md @@ -0,0 +1,26 @@ + +Small Ubuntu 16.04 docker image + +The size of this image is ~44MB (less than half than `ubuntu:16.04). +This is possible by the removal of packages that are not required in a container: +- e2fslibs +- e2fsprogs +- init +- initscripts +- libcap2-bin +- libcryptsetup4 +- libdevmapper1.02.1 +- libkmod2 +- libmount1 +- libncursesw5 +- libprocps4 +- libsmartcols1 +- libudev1 +- mount +- ncurses-base +- ncurses-bin +- procps +- systemd +- systemd-sysv +- tzdata +- util-linux diff --git a/images/ubuntu-slim/excludes b/images/ubuntu-slim/excludes new file mode 100644 index 000000000..d5af11a9d --- /dev/null +++ b/images/ubuntu-slim/excludes @@ -0,0 +1,10 @@ +path-exclude /usr/share/doc/* +path-include /usr/share/doc/*/copyright +path-exclude /usr/share/man/* +path-exclude /usr/share/groff/* +path-exclude /usr/share/info/* +path-exclude /usr/share/locale/* +path-include /usr/share/locale/en_US* +path-include /usr/share/locale/locale.alias +path-exclude /usr/share/i18n/locales/* +path-include /usr/share/i18n/locales/en_US* diff --git a/images/ubuntu-slim/runlevel b/images/ubuntu-slim/runlevel new file mode 100755 index 000000000..c52d3c26b --- /dev/null +++ b/images/ubuntu-slim/runlevel @@ -0,0 +1,3 @@ +#!/bin/sh + +exit 0