commit
7ffa0ae265
1937 changed files with 388997 additions and 395568 deletions
45
.github/ISSUE_TEMPLATE.md
vendored
Normal file
45
.github/ISSUE_TEMPLATE.md
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
|
||||
|
||||
**Is this a request for help?** (If yes, you should use our troubleshooting guide and community support channels, see https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/.):
|
||||
|
||||
**What keywords did you search in NGINX Ingress controller issues before filing this one?** (If you have found any duplicates, you should instead reply there.):
|
||||
|
||||
---
|
||||
|
||||
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
|
||||
|
||||
<!--
|
||||
If this is a BUG REPORT, please:
|
||||
- Fill in as much of the template below as you can. If you leave out
|
||||
information, we can't help you as well.
|
||||
|
||||
If this is a FEATURE REQUEST, please:
|
||||
- Describe *in detail* the feature/behavior/change you'd like to see.
|
||||
|
||||
In both cases, be ready for followup questions, and please respond in a timely
|
||||
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||
explain why.
|
||||
-->
|
||||
|
||||
**Kubernetes version** (use `kubectl version`):
|
||||
|
||||
**Environment**:
|
||||
|
||||
- **Cloud provider or hardware configuration**:
|
||||
- **OS** (e.g. from /etc/os-release):
|
||||
- **Kernel** (e.g. `uname -a`):
|
||||
- **Install tools**:
|
||||
- **Others**:
|
||||
|
||||
|
||||
**What happened**:
|
||||
|
||||
|
||||
**What you expected to happen**:
|
||||
|
||||
|
||||
**How to reproduce it** (as minimally and precisely as possible):
|
||||
|
||||
|
||||
**Anything else we need to know**:
|
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
11
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
<!-- Thanks for sending a pull request! Here are some tips for you:
|
||||
1. If this is your first time, read our contributor guidelines https://git.k8s.io/community/contributors/devel/pull-requests.md#the-pr-submit-process and developer guide https://git.k8s.io/community/contributors/devel/development.md#development-guide
|
||||
2. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/devel/pull-requests.md#best-practices-for-faster-reviews
|
||||
3. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/devel/pull-requests.md#write-release-notes-if-needed
|
||||
-->
|
||||
|
||||
**What this PR does / why we need it**:
|
||||
|
||||
**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #
|
||||
|
||||
**Special notes for your reviewer**:
|
|
@ -11,7 +11,7 @@ notifications:
|
|||
go:
|
||||
- 1.9
|
||||
|
||||
go_import_path: k8s.io/ingress
|
||||
go_import_path: k8s.io/ingress-nginx
|
||||
|
||||
jobs:
|
||||
include:
|
||||
|
|
1429
Godeps/Godeps.json
generated
1429
Godeps/Godeps.json
generated
File diff suppressed because it is too large
Load diff
5
Godeps/Readme
generated
5
Godeps/Readme
generated
|
@ -1,5 +0,0 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
405
Gopkg.lock
generated
Normal file
405
Gopkg.lock
generated
Normal file
|
@ -0,0 +1,405 @@
|
|||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/PuerkitoBio/purell"
|
||||
packages = ["."]
|
||||
revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/PuerkitoBio/urlesc"
|
||||
packages = ["."]
|
||||
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/armon/go-proxyproto"
|
||||
packages = ["."]
|
||||
revision = "48572f11356f1843b694f21a290d4f1006bc5e47"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
|
||||
version = "v3.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/distribution"
|
||||
packages = ["digestset","reference"]
|
||||
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/docker/spdystream"
|
||||
packages = [".","spdy"]
|
||||
revision = "bc6354cbbc295e925e4c611ffe90c1f287ee54db"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/emicklei/go-restful"
|
||||
packages = [".","log"]
|
||||
revision = "5741799b275a3c4a5a9623a993576d7545cf7b5c"
|
||||
version = "v2.4.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/emicklei/go-restful-swagger12"
|
||||
packages = ["."]
|
||||
revision = "dcef7f55730566d41eae5db10e7d6981829720f6"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/fullsailor/pkcs7"
|
||||
packages = ["."]
|
||||
revision = "a009d8d7de53d9503c797cb8ec66fa3b21eed209"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/ghodss/yaml"
|
||||
packages = ["."]
|
||||
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/jsonpointer"
|
||||
packages = ["."]
|
||||
revision = "779f45308c19820f1a69e9a4cd965f496e0da10f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/jsonreference"
|
||||
packages = ["."]
|
||||
revision = "36d33bfe519efae5632669801b180bf1a245da3b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/spec"
|
||||
packages = ["."]
|
||||
revision = "48c2a7185575f9103a5a3863eff950bb776899d2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/swag"
|
||||
packages = ["."]
|
||||
revision = "f3f9494671f93fcff853e3c6e9e948b3eb71e590"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = ["gogoproto","proto","protoc-gen-gogo/descriptor","sortkeys"]
|
||||
revision = "100ba4e885062801d56799d78530b73b178a78f3"
|
||||
version = "v0.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/glog"
|
||||
packages = ["."]
|
||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/groupcache"
|
||||
packages = ["lru"]
|
||||
revision = "b710c8433bd175204919eb38776e944233235d03"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
|
||||
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
revision = "316fb6d3f031ae8f4d457c6c5186b9e3ded70435"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/gofuzz"
|
||||
packages = ["."]
|
||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gnostic"
|
||||
packages = ["OpenAPIv2","compiler","extensions"]
|
||||
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [".","diskcache"]
|
||||
revision = "c1f8028e62adb3d518b823a2f8e6a95c38bdd3aa"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [".","simplelru"]
|
||||
revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/howeyc/gopass"
|
||||
packages = ["."]
|
||||
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/imdario/mergo"
|
||||
packages = ["."]
|
||||
revision = "3e95a51e0639b4cf372f2ccf74c86749d747fbdc"
|
||||
version = "0.2.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
revision = "6ed27152e0428abfde127acb33b08b03a1e67cac"
|
||||
version = "1.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/juju/ratelimit"
|
||||
packages = ["."]
|
||||
revision = "5b9ff866471762aa2ab2dced63c9fb6f53921342"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/kr/pty"
|
||||
packages = ["."]
|
||||
revision = "95d05c1eef33a45bd58676b6ce28d105839b8d0b"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kylelemons/godebug"
|
||||
packages = ["diff","pretty"]
|
||||
revision = "d65d576e9348f5982d7f6d83682b694e731a45c6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mailru/easyjson"
|
||||
packages = ["buffer","jlexer","jwriter"]
|
||||
revision = "2a92e673c9a6302dd05c3a691ae1f24aef46457d"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/go-ps"
|
||||
packages = ["."]
|
||||
revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
revision = "d0303fe809921458f417bcf828397a65db30a7e4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncabatoff/process-exporter"
|
||||
packages = [".","proc"]
|
||||
revision = "5917bc766b95a1fa3c2ae85340f4de02a6b7e15e"
|
||||
source = "github.com/aledbf/process-exporter"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/opencontainers/go-digest"
|
||||
packages = ["."]
|
||||
revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf"
|
||||
version = "v1.0.0-rc1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/paultag/sniff"
|
||||
packages = ["parser"]
|
||||
revision = "87325c3dddf408cfb71f5044873d34ac426d5a59"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pborman/uuid"
|
||||
packages = ["."]
|
||||
revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53"
|
||||
version = "v1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = ["prometheus","prometheus/promhttp"]
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "6f3806018612930941127f2a7c6c453ba2c527d2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"]
|
||||
revision = "1bab55dd05dbff384524a6a1c99006d9eb5f139b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [".","xfs"]
|
||||
revision = "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/zakjan/cert-chain-resolver"
|
||||
packages = ["certUtil"]
|
||||
revision = "c222fb53d84f5c835aab8027b5d422d4089f9d29"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
||||
revision = "a04bdaca5b32abe1c069418fb7088ae607de5bd0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "ebfc5b4631820b793c9010c87fd8fef0f39eb082"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
|
||||
revision = "825fc78a2fd6fa0a5447e300189e3219e05e1f25"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "f676e0f3ac6395ff1a529ae59a6670878a8371a6"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","codes","connectivity","credentials","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
|
||||
revision = "f92cdcd7dcdc69e81b2d7b338479a19a8723cfa3"
|
||||
version = "v1.6.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
packages = ["."]
|
||||
revision = "629574ca2a5df945712d3079857300b5e4da0236"
|
||||
version = "v1.4.2"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/go-playground/pool.v3"
|
||||
packages = ["."]
|
||||
revision = "e73cd3a5ded835540c5cf4778488579c5b357d68"
|
||||
version = "v3.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/inf.v0"
|
||||
packages = ["."]
|
||||
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
|
||||
version = "v0.9.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/api"
|
||||
packages = ["admissionregistration/v1alpha1","apps/v1beta1","apps/v1beta2","authentication/v1","authentication/v1beta1","authorization/v1","authorization/v1beta1","autoscaling/v1","autoscaling/v2beta1","batch/v1","batch/v1beta1","batch/v2alpha1","certificates/v1beta1","core/v1","extensions/v1beta1","networking/v1","policy/v1beta1","rbac/v1","rbac/v1alpha1","rbac/v1beta1","scheduling/v1alpha1","settings/v1alpha1","storage/v1","storage/v1beta1"]
|
||||
revision = "81aa34336d28aadc3a8e8da7dfd9258c5157e5e4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
packages = ["pkg/features"]
|
||||
revision = "a5bbfd114a9b122acd741c61d88c84812375d9e1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/api/validation","pkg/apimachinery","pkg/apimachinery/announced","pkg/apimachinery/registered","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1/validation","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/httpstream","pkg/util/httpstream/spdy","pkg/util/intstr","pkg/util/json","pkg/util/mergepatch","pkg/util/net","pkg/util/rand","pkg/util/remotecommand","pkg/util/runtime","pkg/util/sets","pkg/util/strategicpatch","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/json","third_party/forked/golang/netutil","third_party/forked/golang/reflect"]
|
||||
revision = "3b05bbfa0a45413bfa184edbf9af617e277962fb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/apiserver"
|
||||
packages = ["pkg/authentication/authenticator","pkg/authentication/serviceaccount","pkg/authentication/user","pkg/features","pkg/server/healthz","pkg/util/feature"]
|
||||
revision = "c1e53d745d0fe45bf7d5d44697e6eface25fceca"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/client-go"
|
||||
packages = ["discovery","discovery/fake","kubernetes","kubernetes/fake","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/admissionregistration/v1alpha1/fake","kubernetes/typed/apps/v1beta1","kubernetes/typed/apps/v1beta1/fake","kubernetes/typed/apps/v1beta2","kubernetes/typed/apps/v1beta2/fake","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1/fake","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authentication/v1beta1/fake","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1/fake","kubernetes/typed/authorization/v1beta1","kubernetes/typed/authorization/v1beta1/fake","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v1/fake","kubernetes/typed/autoscaling/v2beta1","kubernetes/typed/autoscaling/v2beta1/fake","kubernetes/typed/batch/v1","kubernetes/typed/batch/v1/fake","kubernetes/typed/batch/v1beta1","kubernetes/typed/batch/v1beta1/fake","kubernetes/typed/batch/v2alpha1","kubernetes/typed/batch/v2alpha1/fake","kubernetes/typed/certificates/v1beta1","kubernetes/typed/certificates/v1beta1/fake","kubernetes/typed/core/v1","kubernetes/typed/core/v1/fake","kubernetes/typed/extensions/v1beta1","kubernetes/typed/extensions/v1beta1/fake","kubernetes/typed/networking/v1","kubernetes/typed/networking/v1/fake","kubernetes/typed/policy/v1beta1","kubernetes/typed/policy/v1beta1/fake","kubernetes/typed/rbac/v1","kubernetes/typed/rbac/v1/fake","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1alpha1/fake","kubernetes/typed/rbac/v1beta1","kubernetes/typed/rbac/v1beta1/fake","kubernetes/typed/scheduling/v1alpha1","kubernetes/typed/scheduling/v1alpha1/fake","kubernetes/typed/settings/v1alpha1","kubernetes/typed/settings/v1alpha1/fake","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1/fake","kubernetes/typed/storage/v1beta1","kubernetes/typed/storage/v1beta1/fake","pkg/version","rest","rest/watch","testing","tools/auth","tools/cache","tools/cache/testing","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/leaderelection","tools/leaderelection/resourcelock","tools/metrics","tools/pager","tools/record","tools/reference","tools/remotecommand","transport","transport/spdy","util/cert","util/cert/triple","util/exec","util/flowcontrol","util/homedir","util/integer","util/retry","util/workqueue"]
|
||||
revision = "82aa063804cf055e16e8911250f888bc216e8b61"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = ["pkg/common"]
|
||||
revision = "abfc5fbe1cf87ee697db107fdfd24c32fe4397a8"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/kubernetes"
|
||||
packages = ["pkg/api","pkg/api/helper","pkg/api/install","pkg/api/service","pkg/api/util","pkg/api/v1","pkg/api/v1/helper","pkg/api/v1/pod","pkg/api/validation","pkg/apis/extensions","pkg/apis/networking","pkg/capabilities","pkg/cloudprovider","pkg/controller","pkg/features","pkg/kubelet/apis","pkg/kubelet/apis/cri/v1alpha1/runtime","pkg/kubelet/container","pkg/kubelet/types","pkg/kubelet/util/format","pkg/kubelet/util/ioutils","pkg/kubelet/util/sliceutils","pkg/security/apparmor","pkg/serviceaccount","pkg/util/file","pkg/util/hash","pkg/util/io","pkg/util/mount","pkg/util/net/sets","pkg/util/parsers","pkg/util/pointer","pkg/util/sysctl","pkg/util/taints","pkg/volume","pkg/volume/util","third_party/forked/golang/expansion"]
|
||||
revision = "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/utils"
|
||||
packages = ["exec"]
|
||||
revision = "4fe312863be2155a7b68acd2aff1c9221b24e68c"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "4511b8e7ec7b35d8752f919dfe51b39a07852a98de8d765ce7f2512032450cde"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
110
Gopkg.toml
Normal file
110
Gopkg.toml
Normal file
|
@ -0,0 +1,110 @@
|
|||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/docker/distribution"
|
||||
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/opencontainers/go-digest"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/armon/go-proxyproto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/glog"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/imdario/mergo"
|
||||
version = "0.2.2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/kylelemons/godebug"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/go-ps"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/ncabatoff/process-exporter"
|
||||
source = "github.com/aledbf/process-exporter"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/paultag/sniff"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pborman/uuid"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/errors"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/pflag"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/zakjan/cert-chain-resolver"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
version = "1.4.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/go-playground/pool.v3"
|
||||
version = "3.1.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "k8s.io/api"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "k8s.io/apimachinery"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "k8s.io/apiserver"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "k8s.io/client-go"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
version = "1.8.0"
|
166
Makefile
166
Makefile
|
@ -1,58 +1,142 @@
|
|||
all: fmt lint vet
|
||||
all: push
|
||||
|
||||
BUILDTAGS=
|
||||
|
||||
# base package. It contains the common and backends code
|
||||
PKG := "k8s.io/ingress"
|
||||
# Use the 0.0 tag for testing, it shouldn't clobber any release builds
|
||||
TAG?=0.9.0-beta.15
|
||||
REGISTRY?=gcr.io/google_containers
|
||||
GOOS?=linux
|
||||
DOCKER?=gcloud docker --
|
||||
SED_I?=sed -i
|
||||
GOHOSTOS ?= $(shell go env GOHOSTOS)
|
||||
|
||||
GO_LIST_FILES=$(shell go list ${PKG}/... | grep -v vendor | grep -v -e "test/e2e")
|
||||
ifeq ($(GOHOSTOS),darwin)
|
||||
SED_I=sed -i ''
|
||||
endif
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
@go list -f '{{if len .TestGoFiles}}"gofmt -s -l {{.Dir}}"{{end}}' ${GO_LIST_FILES} | xargs -L 1 sh -c
|
||||
REPO_INFO=$(shell git config --get remote.origin.url)
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
@go list -f '{{if len .TestGoFiles}}"golint -min_confidence=0.85 {{.Dir}}/..."{{end}}' ${GO_LIST_FILES} | xargs -L 1 sh -c
|
||||
ifndef COMMIT
|
||||
COMMIT := git-$(shell git rev-parse --short HEAD)
|
||||
endif
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@go test -v -race -tags "$(BUILDTAGS) cgo" ${GO_LIST_FILES}
|
||||
PKG=k8s.io/ingress-nginx
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: ginkgo
|
||||
@go run hack/e2e.go -v --up --test --down
|
||||
ARCH ?= $(shell go env GOARCH)
|
||||
GOARCH = ${ARCH}
|
||||
DUMB_ARCH = ${ARCH}
|
||||
|
||||
.PHONY: cover
|
||||
cover:
|
||||
@go list -f '{{if len .TestGoFiles}}"go test -coverprofile={{.Dir}}/.coverprofile {{.ImportPath}}"{{end}}' ${GO_LIST_FILES} | xargs -L 1 sh -c
|
||||
gover
|
||||
goveralls -coverprofile=gover.coverprofile -service travis-ci
|
||||
ALL_ARCH = amd64 arm arm64 ppc64le
|
||||
|
||||
.PHONY: vet
|
||||
vet:
|
||||
@go vet ${GO_LIST_FILES}
|
||||
QEMUVERSION=v2.9.1
|
||||
|
||||
IMGNAME = nginx-ingress-controller
|
||||
IMAGE = $(REGISTRY)/$(IMGNAME)
|
||||
MULTI_ARCH_IMG = $(IMAGE)-$(ARCH)
|
||||
|
||||
# Set default base image dynamically for each arch
|
||||
BASEIMAGE?=gcr.io/google_containers/nginx-slim-$(ARCH):0.25
|
||||
|
||||
ifeq ($(ARCH),arm)
|
||||
QEMUARCH=arm
|
||||
GOARCH=arm
|
||||
DUMB_ARCH=armhf
|
||||
endif
|
||||
ifeq ($(ARCH),arm64)
|
||||
QEMUARCH=aarch64
|
||||
endif
|
||||
ifeq ($(ARCH),ppc64le)
|
||||
QEMUARCH=ppc64le
|
||||
GOARCH=ppc64le
|
||||
DUMB_ARCH=ppc64el
|
||||
endif
|
||||
#ifeq ($(ARCH),s390x)
|
||||
# QEMUARCH=s390x
|
||||
#endif
|
||||
|
||||
TEMP_DIR := $(shell mktemp -d)
|
||||
|
||||
DOCKERFILE := $(TEMP_DIR)/rootfs/Dockerfile
|
||||
|
||||
all: all-container
|
||||
|
||||
sub-container-%:
|
||||
$(MAKE) ARCH=$* build container
|
||||
|
||||
sub-push-%:
|
||||
$(MAKE) ARCH=$* push
|
||||
|
||||
all-container: $(addprefix sub-container-,$(ALL_ARCH))
|
||||
|
||||
all-push: $(addprefix sub-push-,$(ALL_ARCH))
|
||||
|
||||
container: .container-$(ARCH)
|
||||
.container-$(ARCH):
|
||||
cp -r ./* $(TEMP_DIR)
|
||||
$(SED_I) 's|BASEIMAGE|$(BASEIMAGE)|g' $(DOCKERFILE)
|
||||
$(SED_I) "s|QEMUARCH|$(QEMUARCH)|g" $(DOCKERFILE)
|
||||
$(SED_I) "s|DUMB_ARCH|$(DUMB_ARCH)|g" $(DOCKERFILE)
|
||||
|
||||
ifeq ($(ARCH),amd64)
|
||||
# When building "normally" for amd64, remove the whole line, it has no part in the amd64 image
|
||||
$(SED_I) "/CROSS_BUILD_/d" $(DOCKERFILE)
|
||||
else
|
||||
# When cross-building, only the placeholder "CROSS_BUILD_" should be removed
|
||||
# Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel
|
||||
$(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)/rootfs
|
||||
$(SED_I) "s/CROSS_BUILD_//g" $(DOCKERFILE)
|
||||
endif
|
||||
|
||||
$(DOCKER) build -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR)/rootfs
|
||||
|
||||
ifeq ($(ARCH), amd64)
|
||||
# This is for to maintain the backward compatibility
|
||||
$(DOCKER) tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):$(TAG)
|
||||
endif
|
||||
|
||||
push: .push-$(ARCH)
|
||||
.push-$(ARCH):
|
||||
$(DOCKER) push $(MULTI_ARCH_IMG):$(TAG)
|
||||
ifeq ($(ARCH), amd64)
|
||||
$(DOCKER) push $(IMAGE):$(TAG)
|
||||
endif
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
make -C controllers/nginx clean
|
||||
$(DOCKER) rmi -f $(MULTI_ARCH_IMG):$(TAG) || true
|
||||
|
||||
.PHONY: controllers
|
||||
controllers:
|
||||
make -C controllers/nginx build
|
||||
build: clean
|
||||
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -a -installsuffix cgo \
|
||||
-ldflags "-s -w -X ${PKG}/version.RELEASE=${TAG} -X ${PKG}/version.COMMIT=${COMMIT} -X ${PKG}/version.REPO=${REPO_INFO}" \
|
||||
-o ${TEMP_DIR}/rootfs/nginx-ingress-controller ${PKG}/cmd/nginx
|
||||
|
||||
.PHONY: docker-build
|
||||
docker-build:
|
||||
make -C controllers/nginx all-container
|
||||
fmt:
|
||||
@echo "+ $@"
|
||||
@go list -f '{{if len .TestGoFiles}}"gofmt -s -l {{.Dir}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push:
|
||||
make -C controllers/nginx all-push
|
||||
lint:
|
||||
@echo "+ $@"
|
||||
@go list -f '{{if len .TestGoFiles}}"golint {{.Dir}}/..."{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c
|
||||
|
||||
.PHONE: release
|
||||
release:
|
||||
make -C controllers/nginx release
|
||||
test: fmt lint vet
|
||||
@echo "+ $@"
|
||||
@go test -v -race -tags "$(BUILDTAGS) cgo" $(shell go list ${PKG}/... | grep -v vendor)
|
||||
|
||||
.PHONY: ginkgo
|
||||
ginkgo:
|
||||
go get github.com/onsi/ginkgo/ginkgo
|
||||
cover:
|
||||
@echo "+ $@"
|
||||
@go list -f '{{if len .TestGoFiles}}"go test -coverprofile={{.Dir}}/.coverprofile {{.ImportPath}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c
|
||||
gover
|
||||
goveralls -coverprofile=gover.coverprofile -service travis-ci -repotoken ${COVERALLS_TOKEN}
|
||||
|
||||
vet:
|
||||
@echo "+ $@"
|
||||
@go vet $(shell go list ${PKG}/... | grep -v vendor)
|
||||
|
||||
release: all-container all-push
|
||||
echo "done"
|
||||
|
||||
.PHONY: docker-build
|
||||
docker-build: all-container
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push: all-push
|
||||
|
|
553
README.md
553
README.md
|
@ -1,4 +1,4 @@
|
|||
# NGINX Ingress
|
||||
# NGINX Ingress Controller
|
||||
|
||||
[](https://travis-ci.org/kubernetes/ingress-nginx)
|
||||
[](https://coveralls.io/github/kubernetes/ingress-nginx?branch=master)
|
||||
|
@ -16,8 +16,6 @@ The GCE ingress controller was moved to [github.com/kubernetes/ingress-gce](http
|
|||
* Learn more about using Ingress
|
||||
* See our user documentation on [k8s.io](http://kubernetes.io/docs/user-guide/ingress/)
|
||||
* Follow through to the respective platform specific [examples](examples/README.md)
|
||||
* Write your own Ingress controller
|
||||
* See our [developer documentation](docs/dev/README.md)
|
||||
* Deploy existing Ingress controllers
|
||||
* See our [admin documentation](docs/admin.md)
|
||||
* Contribute
|
||||
|
@ -25,3 +23,552 @@ The GCE ingress controller was moved to [github.com/kubernetes/ingress-gce](http
|
|||
* Debug
|
||||
* Peruse the [FAQ section](docs/faq/README.md)
|
||||
* Ask on one of the [user-support channels](CONTRIBUTING.md#support-channels)
|
||||
|
||||
### What is an Ingress Controller?
|
||||
|
||||
Configuring a webserver or loadbalancer is harder than it should be. Most webserver configuration files are very similar. There are some applications that have weird little quirks that tend to throw a wrench in things, but for the most part you can apply the same logic to them and achieve a desired result.
|
||||
|
||||
The Ingress resource embodies this idea, and an Ingress controller is meant to handle all the quirks associated with a specific "class" of Ingress (be it a single instance of a loadbalancer, or a more complicated setup of frontends that provide GSLB, DDoS protection, etc).
|
||||
|
||||
An Ingress Controller is a daemon, deployed as a Kubernetes Pod, that watches the apiserver's `/ingresses` endpoint for updates to the [Ingress resource](https://kubernetes.io/docs/concepts/services-networking/ingress/). Its job is to satisfy requests for Ingresses.
|
||||
|
||||
### Introduction
|
||||
This is an nginx Ingress controller that uses [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configmap/#understanding-configmaps) to store the nginx configuration. See [Ingress controller documentation](../README.md) for details on how it works.
|
||||
|
||||
## Contents
|
||||
|
||||
* [Conventions](#conventions)
|
||||
* [Requirements](#requirements)
|
||||
* [Command line arguments](#command-line-arguments)
|
||||
* [Dry running](#try-running-the-ingress-controller)
|
||||
* [Deployment](#deployment)
|
||||
* [HTTP](#http)
|
||||
* [HTTPS](#https)
|
||||
* [Default SSL Certificate](#default-ssl-certificate)
|
||||
* [HTTPS enforcement](#server-side-https-enforcement)
|
||||
* [HSTS](#http-strict-transport-security)
|
||||
* [Kube-Lego](#automated-certificate-management-with-kube-lego)
|
||||
* [Source IP address](#source-ip-address)
|
||||
* [TCP Services](#exposing-tcp-services)
|
||||
* [UDP Services](#exposing-udp-services)
|
||||
* [Proxy Protocol](#proxy-protocol)
|
||||
* [Opentracing](#opentracing)
|
||||
* [NGINX customization](configuration.md)
|
||||
* [Custom errors](#custom-errors)
|
||||
* [NGINX status page](#nginx-status-page)
|
||||
* [Running multiple ingress controllers](#running-multiple-ingress-controllers)
|
||||
* [Running on Cloudproviders](#running-on-cloudproviders)
|
||||
* [Disabling NGINX ingress controller](#disabling-nginx-ingress-controller)
|
||||
* [Log format](#log-format)
|
||||
* [Local cluster](#local-cluster)
|
||||
* [Debug & Troubleshooting](#debug--troubleshooting)
|
||||
* [Limitations](#limitations)
|
||||
* [Why endpoints and not services?](#why-endpoints-and-not-services)
|
||||
* [NGINX Notes](#nginx-notes)
|
||||
|
||||
## Conventions
|
||||
|
||||
Anytime we reference a tls secret, we mean (x509, pem encoded, RSA 2048, etc). You can generate such a certificate with:
|
||||
`openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${KEY_FILE} -out ${CERT_FILE} -subj "/CN=${HOST}/O=${HOST}"`
|
||||
and create the secret via `kubectl create secret tls ${CERT_NAME} --key ${KEY_FILE} --cert ${CERT_FILE}`
|
||||
|
||||
## Requirements
|
||||
- Default backend [404-server](https://github.com/kubernetes/ingress/tree/master/images/404-server)
|
||||
|
||||
## Command line arguments
|
||||
|
||||
```
|
||||
Usage of :
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--apiserver-host string The address of the Kubernetes Apiserver to connect to in the format of protocol://address:port, e.g., http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted.
|
||||
--configmap string Name of the ConfigMap that contains the custom configuration to use
|
||||
--default-backend-service string Service used to serve a 404 page for the default backend. Takes the form
|
||||
namespace/name. The controller uses the first node port of this Service for
|
||||
the default backend.
|
||||
--default-server-port int Default port to use for exposing the default server (catch all) (default 8181)
|
||||
--default-ssl-certificate string Name of the secret
|
||||
that contains a SSL certificate to be used as default for a HTTPS catch-all server
|
||||
--disable-node-list Disable querying nodes. If --force-namespace-isolation is true, this should also be set.
|
||||
--election-id string Election id to use for status update. (default "ingress-controller-leader")
|
||||
--enable-ssl-passthrough Enable SSL passthrough feature. Default is disabled
|
||||
--force-namespace-isolation Force namespace isolation. This flag is required to avoid the reference of secrets or
|
||||
configmaps located in a different namespace than the specified in the flag --watch-namespace.
|
||||
--health-check-path string Defines
|
||||
the URL to be used as health check inside in the default server in NGINX. (default "/healthz")
|
||||
--healthz-port int port for healthz endpoint. (default 10254)
|
||||
--http-port int Indicates the port to use for HTTP traffic (default 80)
|
||||
--https-port int Indicates the port to use for HTTPS traffic (default 443)
|
||||
--ingress-class string Name of the ingress class to route through this controller.
|
||||
--kubeconfig string Path to kubeconfig file with authorization and master location information.
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--profiling Enable profiling via web interface host:port/debug/pprof/ (default true)
|
||||
--publish-service string Service fronting the ingress controllers. Takes the form
|
||||
namespace/name. The controller will set the endpoint records on the
|
||||
ingress objects to reflect those on the service.
|
||||
--sort-backends Defines if backends and it's endpoints should be sorted
|
||||
--ssl-passtrough-proxy-port int Default port to use internally for SSL when SSL Passthgough is enabled (default 442)
|
||||
--status-port int Indicates the TCP port to use for exposing the nginx status page (default 18080)
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
--sync-period duration Relist and confirm cloud resources this often. Default is 10 minutes (default 10m0s)
|
||||
--tcp-services-configmap string Name of the ConfigMap that contains the definition of the TCP services to expose.
|
||||
The key in the map indicates the external port to be used. The value is the name of the
|
||||
service with the format namespace/serviceName and the port of the service could be a
|
||||
number of the name of the port.
|
||||
The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend
|
||||
--udp-services-configmap string Name of the ConfigMap that contains the definition of the UDP services to expose.
|
||||
The key in the map indicates the external port to be used. The value is the name of the
|
||||
service with the format namespace/serviceName and the port of the service could be a
|
||||
number of the name of the port.
|
||||
--update-status Indicates if the
|
||||
ingress controller should update the Ingress status IP/hostname. Default is true (default true)
|
||||
--update-status-on-shutdown Indicates if the
|
||||
ingress controller should update the Ingress status IP/hostname when the controller
|
||||
is being stopped. Default is true (default true)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
--watch-namespace string Namespace to watch for Ingress. Default is to watch all namespaces
|
||||
```
|
||||
|
||||
## Deployment
|
||||
|
||||
First create a default backend and it's corresponding service:
|
||||
```
|
||||
$ kubectl create -f examples/default-backend.yaml
|
||||
```
|
||||
|
||||
Follow the [example-deployment](../../examples/deployment/nginx/README.md) steps to deploy nginx-ingress-controller in Kubernetes cluster (you may prefer other type of workloads, like Daemonset, in production environment).
|
||||
Loadbalancers are created via a ReplicationController or Daemonset:
|
||||
|
||||
## HTTP
|
||||
|
||||
First we need to deploy some application to publish. To keep this simple we will use the [echoheaders app](https://github.com/kubernetes/contrib/blob/master/ingress/echoheaders/echo-app.yaml) that just returns information about the http request as output
|
||||
```
|
||||
kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.8 --replicas=1 --port=8080
|
||||
```
|
||||
|
||||
Now we expose the same application in two different services (so we can create different Ingress rules)
|
||||
```
|
||||
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x
|
||||
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-y
|
||||
```
|
||||
|
||||
Next we create a couple of Ingress rules
|
||||
```
|
||||
kubectl create -f examples/ingress.yaml
|
||||
```
|
||||
|
||||
we check that ingress rules are defined:
|
||||
```
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
echomap -
|
||||
foo.bar.com
|
||||
/foo echoheaders-x:80
|
||||
bar.baz.com
|
||||
/bar echoheaders-y:80
|
||||
/foo echoheaders-x:80
|
||||
```
|
||||
|
||||
Before the deploy of the Ingress controller we need a default backend [404-server](https://github.com/kubernetes/contrib/tree/master/404-server)
|
||||
```
|
||||
kubectl create -f examples/default-backend.yaml
|
||||
kubectl expose rc default-http-backend --port=80 --target-port=8080 --name=default-http-backend
|
||||
```
|
||||
|
||||
Check NGINX it is running with the defined Ingress rules:
|
||||
|
||||
```
|
||||
$ LBIP=$(kubectl get node `kubectl get po -l name=nginx-ingress-lb --template '{{range .items}}{{.spec.nodeName}}{{end}}'` --template '{{range $i, $n := .status.addresses}}{{if eq $n.type "ExternalIP"}}{{$n.address}}{{end}}{{end}}')
|
||||
$ curl $LBIP/foo -H 'Host: foo.bar.com'
|
||||
```
|
||||
|
||||
## HTTPS
|
||||
|
||||
You can secure an Ingress by specifying a secret that contains a TLS private key and certificate. Currently the Ingress only supports a single TLS port, 443, and assumes TLS termination. This controller supports SNI. The TLS secret must contain keys named tls.crt and tls.key that contain the certificate and private key to use for TLS, eg:
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
data:
|
||||
tls.crt: base64 encoded cert
|
||||
tls.key: base64 encoded key
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: foo-secret
|
||||
namespace: default
|
||||
type: kubernetes.io/tls
|
||||
```
|
||||
|
||||
Referencing this secret in an Ingress will tell the Ingress controller to secure the channel from the client to the loadbalancer using TLS:
|
||||
|
||||
```
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: no-rules-map
|
||||
spec:
|
||||
tls:
|
||||
secretName: foo-secret
|
||||
backend:
|
||||
serviceName: s1
|
||||
servicePort: 80
|
||||
```
|
||||
Please follow [PREREQUISITES](../../examples/PREREQUISITES.md) as a guide on how to generate secrets containing SSL certificates. The name of the secret can be different than the name of the certificate.
|
||||
|
||||
Check the [example](../../examples/tls-termination/nginx)
|
||||
|
||||
### Default SSL Certificate
|
||||
|
||||
NGINX provides the option [server name](http://nginx.org/en/docs/http/server_names.html) as a catch-all in case of requests that do not match one of the configured server names. This configuration works without issues for HTTP traffic. In case of HTTPS NGINX requires a certificate. For this reason the Ingress controller provides the flag `--default-ssl-certificate`. The secret behind this flag contains the default certificate to be used in the mentioned case.
|
||||
If this flag is not provided NGINX will use a self signed certificate.
|
||||
|
||||
Running without the flag `--default-ssl-certificate`:
|
||||
|
||||
```
|
||||
$ curl -v https://10.2.78.7:443 -k
|
||||
* Rebuilt URL to: https://10.2.78.7:443/
|
||||
* Trying 10.2.78.4...
|
||||
* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0)
|
||||
* ALPN, offering http/1.1
|
||||
* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH
|
||||
* successfully set certificate verify locations:
|
||||
* CAfile: /etc/ssl/certs/ca-certificates.crt
|
||||
CApath: /etc/ssl/certs
|
||||
* TLSv1.2 (OUT), TLS header, Certificate Status (22):
|
||||
* TLSv1.2 (OUT), TLS handshake, Client hello (1):
|
||||
* TLSv1.2 (IN), TLS handshake, Server hello (2):
|
||||
* TLSv1.2 (IN), TLS handshake, Certificate (11):
|
||||
* TLSv1.2 (IN), TLS handshake, Server key exchange (12):
|
||||
* TLSv1.2 (IN), TLS handshake, Server finished (14):
|
||||
* TLSv1.2 (OUT), TLS handshake, Client key exchange (16):
|
||||
* TLSv1.2 (OUT), TLS change cipher, Client hello (1):
|
||||
* TLSv1.2 (OUT), TLS handshake, Finished (20):
|
||||
* TLSv1.2 (IN), TLS change cipher, Client hello (1):
|
||||
* TLSv1.2 (IN), TLS handshake, Finished (20):
|
||||
* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256
|
||||
* ALPN, server accepted to use http/1.1
|
||||
* Server certificate:
|
||||
* subject: CN=foo.bar.com
|
||||
* start date: Apr 13 00:50:56 2016 GMT
|
||||
* expire date: Apr 13 00:50:56 2017 GMT
|
||||
* issuer: CN=foo.bar.com
|
||||
* SSL certificate verify result: self signed certificate (18), continuing anyway.
|
||||
> GET / HTTP/1.1
|
||||
> Host: 10.2.78.7
|
||||
> User-Agent: curl/7.47.1
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 404 Not Found
|
||||
< Server: nginx/1.11.1
|
||||
< Date: Thu, 21 Jul 2016 15:38:46 GMT
|
||||
< Content-Type: text/html
|
||||
< Transfer-Encoding: chunked
|
||||
< Connection: keep-alive
|
||||
< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload
|
||||
<
|
||||
<span>The page you're looking for could not be found.</span>
|
||||
|
||||
* Connection #0 to host 10.2.78.7 left intact
|
||||
```
|
||||
|
||||
Specifying `--default-ssl-certificate=default/foo-tls`:
|
||||
|
||||
```
|
||||
core@localhost ~ $ curl -v https://10.2.78.7:443 -k
|
||||
* Rebuilt URL to: https://10.2.78.7:443/
|
||||
* Trying 10.2.78.7...
|
||||
* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0)
|
||||
* ALPN, offering http/1.1
|
||||
* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH
|
||||
* successfully set certificate verify locations:
|
||||
* CAfile: /etc/ssl/certs/ca-certificates.crt
|
||||
CApath: /etc/ssl/certs
|
||||
* TLSv1.2 (OUT), TLS header, Certificate Status (22):
|
||||
* TLSv1.2 (OUT), TLS handshake, Client hello (1):
|
||||
* TLSv1.2 (IN), TLS handshake, Server hello (2):
|
||||
* TLSv1.2 (IN), TLS handshake, Certificate (11):
|
||||
* TLSv1.2 (IN), TLS handshake, Server key exchange (12):
|
||||
* TLSv1.2 (IN), TLS handshake, Server finished (14):
|
||||
* TLSv1.2 (OUT), TLS handshake, Client key exchange (16):
|
||||
* TLSv1.2 (OUT), TLS change cipher, Client hello (1):
|
||||
* TLSv1.2 (OUT), TLS handshake, Finished (20):
|
||||
* TLSv1.2 (IN), TLS change cipher, Client hello (1):
|
||||
* TLSv1.2 (IN), TLS handshake, Finished (20):
|
||||
* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256
|
||||
* ALPN, server accepted to use http/1.1
|
||||
* Server certificate:
|
||||
* subject: CN=foo.bar.com
|
||||
* start date: Apr 13 00:50:56 2016 GMT
|
||||
* expire date: Apr 13 00:50:56 2017 GMT
|
||||
* issuer: CN=foo.bar.com
|
||||
* SSL certificate verify result: self signed certificate (18), continuing anyway.
|
||||
> GET / HTTP/1.1
|
||||
> Host: 10.2.78.7
|
||||
> User-Agent: curl/7.47.1
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 404 Not Found
|
||||
< Server: nginx/1.11.1
|
||||
< Date: Mon, 18 Jul 2016 21:02:59 GMT
|
||||
< Content-Type: text/html
|
||||
< Transfer-Encoding: chunked
|
||||
< Connection: keep-alive
|
||||
< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload
|
||||
<
|
||||
<span>The page you're looking for could not be found.</span>
|
||||
|
||||
* Connection #0 to host 10.2.78.7 left intact
|
||||
```
|
||||
|
||||
|
||||
### Server-side HTTPS enforcement
|
||||
|
||||
By default the controller redirects (301) to HTTPS if TLS is enabled for that ingress . If you want to disable that behaviour globally, you can use `ssl-redirect: "false"` in the NGINX config map.
|
||||
|
||||
To configure this feature for specific ingress resources, you can use the `ingress.kubernetes.io/ssl-redirect: "false"` annotation in the particular resource.
|
||||
|
||||
|
||||
### HTTP Strict Transport Security
|
||||
|
||||
HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS.
|
||||
|
||||
By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule.
|
||||
|
||||
To disable this behavior use `hsts=false` in the NGINX config map.
|
||||
|
||||
|
||||
### Automated Certificate Management with Kube-Lego
|
||||
|
||||
[Kube-Lego] automatically requests missing or expired certificates from
|
||||
[Let's Encrypt] by monitoring ingress resources and their referenced secrets. To
|
||||
enable this for an ingress resource you have to add an annotation:
|
||||
|
||||
```
|
||||
kubectl annotate ing ingress-demo kubernetes.io/tls-acme="true"
|
||||
```
|
||||
|
||||
To setup Kube-Lego you can take a look at this [full example]. The first
|
||||
version to fully support Kube-Lego is nginx Ingress controller 0.8.
|
||||
|
||||
[full example]:https://github.com/jetstack/kube-lego/tree/master/examples
|
||||
[Kube-Lego]:https://github.com/jetstack/kube-lego
|
||||
[Let's Encrypt]:https://letsencrypt.org
|
||||
|
||||
## Source IP address
|
||||
|
||||
By default NGINX uses the content of the header `X-Forwarded-For` as the source of truth to get information about the client IP address. This works without issues in L7 **if we configure the setting `proxy-real-ip-cidr`** with the correct information of the IP/network address of the external load balancer.
|
||||
If the ingress controller is running in AWS we need to use the VPC IPv4 CIDR. This allows NGINX to avoid the spoofing of the header.
|
||||
Another option is to enable proxy protocol using `use-proxy-protocol: "true"`.
|
||||
In this mode NGINX do not uses the content of the header to get the source IP address of the connection.
|
||||
|
||||
## Exposing TCP services
|
||||
|
||||
Ingress does not support TCP services (yet). For this reason this Ingress controller uses the flag `--tcp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `<namespace/service name>:<service port>:[PROXY]:[PROXY]`
|
||||
It is possible to use a number or the name of the port. The two last fields are optional. Adding `PROXY` in either or both of the two last fields we can use Proxy Protocol decoding (listen) and/or encoding (proxy_pass) in a TCP service (https://www.nginx.com/resources/admin-guide/proxy-protocol/).
|
||||
|
||||
The next example shows how to expose the service `example-go` running in the namespace `default` in the port `8080` using the port `9000`
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: tcp-configmap-example
|
||||
data:
|
||||
9000: "default/example-go:8080"
|
||||
```
|
||||
|
||||
|
||||
Please check the [tcp services](../../examples/tcp/nginx/README.md) example
|
||||
|
||||
## Exposing UDP services
|
||||
|
||||
Since 1.9.13 NGINX provides [UDP Load Balancing](https://www.nginx.com/blog/announcing-udp-load-balancing/).
|
||||
|
||||
Ingress does not support UDP services (yet). For this reason this Ingress controller uses the flag `--udp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `<namespace/service name>:<service port>`
|
||||
It is possible to use a number or the name of the port.
|
||||
|
||||
The next example shows how to expose the service `kube-dns` running in the namespace `kube-system` in the port `53` using the port `53`
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: udp-configmap-example
|
||||
data:
|
||||
53: "kube-system/kube-dns:53"
|
||||
```
|
||||
|
||||
|
||||
Please check the [udp services](../../examples/udp/nginx/README.md) example
|
||||
|
||||
## Proxy Protocol
|
||||
|
||||
If you are using a L4 proxy to forward the traffic to the NGINX pods and terminate HTTP/HTTPS there, you will lose the remote endpoint's IP addresses. To prevent this you could use the [Proxy Protocol](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt) for forwarding traffic, this will send the connection details before forwarding the actual TCP connection itself.
|
||||
|
||||
Amongst others [ELBs in AWS](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-proxy-protocol.html) and [HAProxy](http://www.haproxy.org/) support Proxy Protocol.
|
||||
|
||||
Please check the [proxy-protocol](examples/proxy-protocol/) example
|
||||
|
||||
### Opentracing
|
||||
|
||||
Using the third party module [rnburn/nginx-opentracing](https://github.com/rnburn/nginx-opentracing) the NGINX ingress controller can configure NGINX to enable [OpenTracing](http://opentracing.io) instrumentation.
|
||||
By default this feature is disabled.
|
||||
|
||||
To enable the instrumentation we just need to enable the instrumentation in the configuration configmap and set the host where we should send the traces.
|
||||
|
||||
In the [aledbf/zipkin-js-example](https://github.com/aledbf/zipkin-js-example) github repository is possible to see a dockerized version of zipkin-js-example with the required Kubernetes descriptors.
|
||||
To install the example and the zipkin collector we just need to run:
|
||||
|
||||
```
|
||||
$ kubectl create -f https://raw.githubusercontent.com/aledbf/zipkin-js-example/kubernetes/kubernetes/zipkin.yaml
|
||||
$ kubectl create -f https://raw.githubusercontent.com/aledbf/zipkin-js-example/kubernetes/kubernetes/deployment.yaml
|
||||
```
|
||||
|
||||
Also we need to configure the NGINX controller configmap with the required values:
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
data:
|
||||
enable-opentracing: "true"
|
||||
zipkin-collector-host: zipkin.default.svc.cluster.local
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
name: nginx-custom-configuration
|
||||
```
|
||||
|
||||
Using curl we can generate some traces:
|
||||
```
|
||||
$ curl -v http://$(minikube ip)/api -H 'Host: zipkin-js-example'
|
||||
$ curl -v http://$(minikube ip)/api -H 'Host: zipkin-js-example'
|
||||
```
|
||||
|
||||
In the zipkin inteface we can see the details:
|
||||
|
||||

|
||||
|
||||
### Custom errors
|
||||
|
||||
In case of an error in a request the body of the response is obtained from the `default backend`.
|
||||
Each request to the default backend includes two headers:
|
||||
|
||||
- `X-Code` indicates the HTTP code to be returned to the client.
|
||||
- `X-Format` the value of the `Accept` header.
|
||||
|
||||
**Important:** the custom backend must return the correct HTTP status code to be returned. NGINX do not changes the reponse from the custom default backend.
|
||||
|
||||
Using this two headers is possible to use a custom backend service like [this one](https://github.com/kubernetes/ingress/tree/master/examples/customization/custom-errors/nginx) that inspect each request and returns a custom error page with the format expected by the client. Please check the example [custom-errors](examples/customization/custom-errors/nginx/README.md)
|
||||
|
||||
NGINX sends aditional headers that can be used to build custom response:
|
||||
|
||||
- X-Original-URI
|
||||
- X-Namespace
|
||||
- X-Ingress-Name
|
||||
- X-Service-Name
|
||||
|
||||
### NGINX status page
|
||||
|
||||
The ngx_http_stub_status_module module provides access to basic status information. This is the default module active in the url `/nginx_status`.
|
||||
This controller provides an alternative to this module using [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) third party module.
|
||||
To use this module just provide a config map with the key `enable-vts-status=true`. The URL is exposed in the port 18080.
|
||||
Please check the example `example/rc-default.yaml`
|
||||
|
||||

|
||||
|
||||
To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json`
|
||||
|
||||
### Running multiple ingress controllers
|
||||
|
||||
If you're running multiple ingress controllers, or running on a cloudprovider that natively handles
|
||||
ingress, you need to specify the annotation `kubernetes.io/ingress.class: "nginx"` in all ingresses
|
||||
that you would like this controller to claim. Not specifying the annotation will lead to multiple
|
||||
ingress controllers claiming the same ingress. Specifying the wrong value will result in all ingress
|
||||
controllers ignoring the ingress. Multiple ingress controllers running in the same cluster was not
|
||||
supported in Kubernetes versions < 1.3.
|
||||
|
||||
### Running on Cloudproviders
|
||||
|
||||
If you're running this ingress controller on a cloudprovider, you should assume the provider also has a native
|
||||
Ingress controller and specify the ingress.class annotation as indicated in this section.
|
||||
In addition to this, you will need to add a firewall rule for each port this controller is listening on, i.e :80 and :443.
|
||||
|
||||
### Disabling NGINX ingress controller
|
||||
|
||||
Setting the annotation `kubernetes.io/ingress.class` to any value other than "nginx" or the empty string, will force the NGINX Ingress controller to ignore your Ingress. Do this if you wish to use one of the other Ingress controllers at the same time as the NGINX controller.
|
||||
|
||||
### Log format
|
||||
|
||||
The default configuration uses a custom logging format to add additional information about upstreams
|
||||
|
||||
```
|
||||
log_format upstreaminfo '{{ if $cfg.useProxyProtocol }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - '
|
||||
'[$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" '
|
||||
'$request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status';
|
||||
```
|
||||
|
||||
Sources:
|
||||
- [upstream variables](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#variables)
|
||||
- [embedded variables](http://nginx.org/en/docs/http/ngx_http_core_module.html#variables)
|
||||
|
||||
Description:
|
||||
- `$proxy_protocol_addr`: if PROXY protocol is enabled
|
||||
- `$remote_addr`: if PROXY protocol is disabled (default)
|
||||
- `$proxy_add_x_forwarded_for`: the `X-Forwarded-For` client request header field with the $remote_addr variable appended to it, separated by a comma
|
||||
- `$remote_user`: user name supplied with the Basic authentication
|
||||
- `$time_local`: local time in the Common Log Format
|
||||
- `$request`: full original request line
|
||||
- `$status`: response status
|
||||
- `$body_bytes_sent`: number of bytes sent to a client, not counting the response header
|
||||
- `$http_referer`: value of the Referer header
|
||||
- `$http_user_agent`: value of User-Agent header
|
||||
- `$request_length`: request length (including request line, header, and request body)
|
||||
- `$request_time`: time elapsed since the first bytes were read from the client
|
||||
- `$proxy_upstream_name`: name of the upstream. The format is `upstream-<namespace>-<service name>-<service port>`
|
||||
- `$upstream_addr`: keeps the IP address and port, or the path to the UNIX-domain socket of the upstream server. If several servers were contacted during request processing, their addresses are separated by commas
|
||||
- `$upstream_response_length`: keeps the length of the response obtained from the upstream server
|
||||
- `$upstream_response_time`: keeps time spent on receiving the response from the upstream server; the time is kept in seconds with millisecond resolution
|
||||
- `$upstream_status`: keeps status code of the response obtained from the upstream server
|
||||
|
||||
### Local cluster
|
||||
|
||||
Using [`hack/local-up-cluster.sh`](https://github.com/kubernetes/kubernetes/blob/master/hack/local-up-cluster.sh) is possible to start a local kubernetes cluster consisting of a master and a single node. Please read [running-locally.md](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md) for more details.
|
||||
|
||||
Use of `hostNetwork: true` in the ingress controller is required to falls back at localhost:8080 for the apiserver if every other client creation check fails (eg: service account not present, kubeconfig doesn't exist, no master env vars...)
|
||||
|
||||
### Debug & Troubleshooting
|
||||
|
||||
Using the flag `--v=XX` it is possible to increase the level of logging.
|
||||
In particular:
|
||||
- `--v=2` shows details using `diff` about the changes in the configuration in nginx
|
||||
|
||||
```
|
||||
I0316 12:24:37.581267 1 utils.go:148] NGINX configuration diff a//etc/nginx/nginx.conf b//etc/nginx/nginx.conf
|
||||
I0316 12:24:37.581356 1 utils.go:149] --- /tmp/922554809 2016-03-16 12:24:37.000000000 +0000
|
||||
+++ /tmp/079811012 2016-03-16 12:24:37.000000000 +0000
|
||||
@@ -235,7 +235,6 @@
|
||||
|
||||
upstream default-echoheadersx {
|
||||
least_conn;
|
||||
- server 10.2.112.124:5000;
|
||||
server 10.2.208.50:5000;
|
||||
|
||||
}
|
||||
I0316 12:24:37.610073 1 command.go:69] change in configuration detected. Reloading...
|
||||
```
|
||||
|
||||
- `--v=3` shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format
|
||||
- `--v=5` configures NGINX in [debug mode](http://nginx.org/en/docs/debugging_log.html)
|
||||
|
||||
### Limitations
|
||||
|
||||
- Ingress rules for TLS require the definition of the field `host`
|
||||
|
||||
### Why endpoints and not services
|
||||
|
||||
The NGINX ingress controller does not uses [Services](http://kubernetes.io/docs/user-guide/services) to route traffic to the pods. Instead it uses the Endpoints API in order to bypass [kube-proxy](http://kubernetes.io/docs/admin/kube-proxy/) to allow NGINX features like session affinity and custom load balancing algorithms. It also removes some overhead, such as conntrack entries for iptables DNAT.
|
||||
|
||||
### NGINX notes
|
||||
|
||||
Since `gcr.io/google_containers/nginx-slim:0.8` NGINX contains the next patches:
|
||||
- Dynamic TLS record size [nginx__dynamic_tls_records.patch](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/)
|
||||
NGINX provides the parameter `ssl_buffer_size` to adjust the size of the buffer. Default value in NGINX is 16KB. The ingress controller changes the default to 4KB. This improves the [TLS Time To First Byte (TTTFB)](https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/) but the size is fixed. This patches adapts the size of the buffer to the content is being served helping to improve the perceived latency.
|
||||
|
|
|
@ -22,12 +22,14 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"k8s.io/ingress-nginx/pkg/nginx/controller"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// start a new nginx controller
|
||||
ngx := newNGINXController()
|
||||
ngx := controller.NewNGINXController()
|
||||
|
||||
go handleSigterm(ngx)
|
||||
// start the controller
|
||||
|
@ -40,7 +42,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
func handleSigterm(ngx *NGINXController) {
|
||||
func handleSigterm(ngx *controller.NGINXController) {
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGTERM)
|
||||
<-signalChan
|
5
code-of-conduct.md
Normal file
5
code-of-conduct.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
## Kubernetes Community Code of Conduct
|
||||
|
||||
Kubernetes follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
|
||||
[]()
|
|
@ -1,11 +0,0 @@
|
|||
# Ingress controllers
|
||||
|
||||
This directory contains Ingress controllers.
|
||||
|
||||
Configuring a webserver or loadbalancer is harder than it should be. Most webserver configuration files are very similar. There are some applications that have weird little quirks that tend to throw a wrench in things, but for the most part you can apply the same logic to them and achieve a desired result.
|
||||
|
||||
The Ingress resource embodies this idea, and an Ingress controller is meant to handle all the quirks associated with a specific "class" of Ingress (be it a single instance of a loadbalancer, or a more complicated setup of frontends that provide GSLB, DDoS protection, etc).
|
||||
|
||||
## What is an Ingress Controller?
|
||||
|
||||
An Ingress Controller is a daemon, deployed as a Kubernetes Pod, that watches the apiserver's `/ingresses` endpoint for updates to the [Ingress resource](https://kubernetes.io/docs/concepts/services-networking/ingress/). Its job is to satisfy requests for Ingresses.
|
1
controllers/gce/.gitignore
vendored
1
controllers/gce/.gitignore
vendored
|
@ -1 +0,0 @@
|
|||
glbc
|
|
@ -1,153 +0,0 @@
|
|||
# GLBC: Beta limitations
|
||||
|
||||
As of the Kubernetes 1.7 release, the GCE L7 Loadbalancer controller is still a *beta* product.
|
||||
|
||||
This is a list of beta limitations:
|
||||
|
||||
* [IPs](#static-and-ephemeral-ips): Creating a simple HTTP Ingress will allocate an ephemeral IP. Creating an Ingress with a TLS section will allocate a static IP.
|
||||
* [Latency](#latency): GLBC is not built for performance. Creating many Ingresses at a time can overwhelm it. It won't fall over, but will take its own time to churn through the Ingress queue.
|
||||
* [Quota](#quota): By default, GCE projects are granted a quota of 3 Backend Services. This is insufficient for most Kubernetes clusters.
|
||||
* [Oauth scopes](https://cloud.google.com/compute/docs/authentication): By default GKE/GCE clusters are granted "compute/rw" permissions. If you setup a cluster without these permissions, GLBC is useless and you should delete the controller as described in the [section below](#disabling-glbc). If you don't delete the controller it will keep restarting.
|
||||
* [Default backends](https://cloud.google.com/compute/docs/load-balancing/http/url-map#url_map_simplest_case): All L7 Loadbalancers created by GLBC have a default backend. If you don't specify one in your Ingress, GLBC will assign the 404 default backend mentioned above.
|
||||
* [Load Balancing Algorithms](#load-balancing-algorithms): The ingress controller doesn't support fine grained control over loadbalancing algorithms yet.
|
||||
* [Large clusters](#large-clusters): Ingress on GCE isn't supported on large (>1000 nodes), single-zone clusters.
|
||||
* [Teardown](README.md#deletion): The recommended way to tear down a cluster with active Ingresses is to either delete each Ingress, or hit the `/delete-all-and-quit` endpoint on GLBC, before invoking a cluster teardown script (eg: kube-down.sh). You will have to manually cleanup GCE resources through the [cloud console](https://cloud.google.com/compute/docs/console#access) or [gcloud CLI](https://cloud.google.com/compute/docs/gcloud-compute/) if you simply tear down the cluster with active Ingresses.
|
||||
* [Changing UIDs](#changing-the-cluster-uid): You can change the UID used as a suffix for all your GCE cloud resources, but this requires you to delete existing Ingresses first.
|
||||
* [Cleaning up](#cleaning-up-cloud-resources): You can delete loadbalancers that older clusters might've leaked due to premature teardown through the GCE console.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you can receive traffic through the GCE L7 Loadbalancer Controller you need:
|
||||
* A Working Kubernetes cluster >= 1.1
|
||||
* At least 1 Kubernetes NodePort Service (this is the endpoint for your Ingress)
|
||||
* A single instance of the L7 Loadbalancer Controller pod, if you're running Kubernetes < 1.3 (the GCP ingress controller runs on the master in later versions)
|
||||
|
||||
## Quota
|
||||
|
||||
GLBC is not aware of your GCE quota. As of this writing users get 5 [GCE Backend Services](https://cloud.google.com/compute/docs/load-balancing/http/backend-service) by default. If you plan on creating Ingresses for multiple Kubernetes Services, remember that each one requires a backend service, and request quota. Should you fail to do so the controller will poll periodically and grab the first free backend service slot it finds. You can view your quota:
|
||||
|
||||
```console
|
||||
$ gcloud compute project-info describe --project myproject
|
||||
```
|
||||
See [GCE documentation](https://cloud.google.com/compute/docs/resource-quotas#checking_your_quota) for how to request more.
|
||||
|
||||
## Latency
|
||||
|
||||
It takes ~1m to spin up a loadbalancer (this includes acquiring the public ip), and ~5-6m before the GCE api starts healthchecking backends. So as far as latency goes, here's what to expect:
|
||||
|
||||
Assume one creates the following simple Ingress:
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: test-ingress
|
||||
spec:
|
||||
backend:
|
||||
# This will just loopback to the default backend of GLBC
|
||||
serviceName: default-http-backend
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
* time, t=0
|
||||
```console
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
test-ingress - default-http-backend:80
|
||||
$ kubectl describe ing
|
||||
No events.
|
||||
```
|
||||
|
||||
* time, t=1m
|
||||
```console
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
test-ingress - default-http-backend:80 130.211.5.27
|
||||
|
||||
$ kubectl describe ing
|
||||
target-proxy: k8s-tp-default-test-ingress
|
||||
url-map: k8s-um-default-test-ingress
|
||||
backends: {"k8s-be-32342":"UNKNOWN"}
|
||||
forwarding-rule: k8s-fw-default-test-ingress
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
───────── ──────── ───── ──── ───────────── ────── ───────
|
||||
46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27
|
||||
```
|
||||
|
||||
* time, t=5m
|
||||
```console
|
||||
$ kubectl describe ing
|
||||
target-proxy: k8s-tp-default-test-ingress
|
||||
url-map: k8s-um-default-test-ingress
|
||||
backends: {"k8s-be-32342":"HEALTHY"}
|
||||
forwarding-rule: k8s-fw-default-test-ingress
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
───────── ──────── ───── ──── ───────────── ────── ───────
|
||||
46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27
|
||||
|
||||
```
|
||||
|
||||
## Static and Ephemeral IPs
|
||||
|
||||
GCE has a concept of [ephemeral](https://cloud.google.com/compute/docs/instances-and-network#ephemeraladdress) and [static](https://cloud.google.com/compute/docs/instances-and-network#reservedaddress) IPs. A production website would always want a static IP, which ephemeral IPs are cheaper (both in terms of quota and cost), and are therefore better suited for experimentation.
|
||||
* Creating a HTTP Ingress (i.e an Ingress without a TLS section) allocates an ephemeral IP, because we don't believe HTTP is the right way to deploy an app.
|
||||
* Creating an Ingress with a TLS section allocates a static IP, because GLBC assumes you mean business.
|
||||
* Modifying an Ingress and adding a TLS section allocates a static IP, but the IP *will* change. This is a beta limitation.
|
||||
* You can [promote](https://cloud.google.com/compute/docs/instances-and-network#promote_ephemeral_ip) an ephemeral to a static IP by hand, if required.
|
||||
|
||||
## Load Balancing Algorithms
|
||||
|
||||
Right now, a kube-proxy nodePort is a necessary condition for Ingress on GCP. This is because the cloud lb doesn't understand how to route directly to your pods. Incorporating kube-proxy and cloud lb algorithms so they cooperate toward a common goal is still a work in progress. If you really want fine grained control over the algorithm, you should deploy the nginx ingress controller.
|
||||
|
||||
## Large clusters
|
||||
|
||||
Ingress is not yet supported on single zone clusters of size > 1000 nodes ([issue](https://github.com/kubernetes/contrib/issues/1724)). If you'd like to use Ingress on a large cluster, spread it across 2 or more zones such that no single zone contains more than a 1000 nodes. This is because there is a [limit](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances) to the number of instances one can add to a single GCE Instance Group. In a multi-zone cluster, each zone gets its own instance group.
|
||||
|
||||
## Disabling GLBC
|
||||
|
||||
To completely stop the Ingress controller on GCE/GKE, please see [this] (/docs/faq/gce.md#how-do-i-disable-the-gce-ingress-controller) faq.
|
||||
|
||||
## Changing the cluster UID
|
||||
|
||||
The Ingress controller configures itself to add the UID it stores in a configmap in the `kube-system` namespace.
|
||||
|
||||
```console
|
||||
$ kubectl --namespace=kube-system get configmaps
|
||||
NAME DATA AGE
|
||||
ingress-uid 1 12d
|
||||
|
||||
$ kubectl --namespace=kube-system get configmaps -o yaml
|
||||
apiVersion: v1
|
||||
items:
|
||||
- apiVersion: v1
|
||||
data:
|
||||
uid: UID
|
||||
kind: ConfigMap
|
||||
...
|
||||
```
|
||||
|
||||
You can pick a different UID, but this requires you to:
|
||||
|
||||
1. Delete existing Ingresses
|
||||
2. Edit the configmap using `kubectl edit`
|
||||
3. Recreate the same Ingress
|
||||
|
||||
After step 3 the Ingress should come up using the new UID as the suffix of all cloud resources. You can't simply change the UID if you have existing Ingresses, because
|
||||
renaming a cloud resource requires a delete/create cycle that the Ingress controller does not currently automate. Note that the UID in step 1 might be an empty string,
|
||||
if you had a working Ingress before upgrading to Kubernetes 1.3.
|
||||
|
||||
__A note on setting the UID__: The Ingress controller uses the token `--` to split a machine generated prefix from the UID itself. If the user supplied UID is found to
|
||||
contain `--` the controller will take the token after the last `--`, and use an empty string if it ends with `--`. For example, if you insert `foo--bar` as the UID,
|
||||
the controller will assume `bar` is the UID. You can either edit the configmap and set the UID to `bar` to match the controller, or delete existing Ingresses as described
|
||||
above, and reset it to a string bereft of `--`.
|
||||
|
||||
## Cleaning up cloud resources
|
||||
|
||||
If you deleted a GKE/GCE cluster without first deleting the associated Ingresses, the controller would not have deleted the associated cloud resources. If you find yourself in such a situation, you can delete the resources by hand:
|
||||
|
||||
1. Navigate to the [cloud console](https://console.cloud.google.com/) and click on the "Networking" tab, then choose "LoadBalancing"
|
||||
2. Find the loadbalancer you'd like to delete, it should have a name formatted as: k8s-um-ns-name--UUID
|
||||
3. Delete it, check the boxes to also cascade the deletion down to associated resources (eg: backend-services)
|
||||
4. Switch to the "Compute Engine" tab, then choose "Instance Groups"
|
||||
5. Delete the Instance Group allocated for the leaked Ingress, it should have a name formatted as: k8s-ig-UUID
|
|
@ -1,20 +0,0 @@
|
|||
# Copyright 2015 The Kubernetes Authors. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.6
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
COPY glbc glbc
|
||||
ENTRYPOINT ["/glbc"]
|
|
@ -1,17 +0,0 @@
|
|||
all: push
|
||||
|
||||
# 0.0 shouldn't clobber any released builds
|
||||
TAG = 0.9.6
|
||||
PREFIX = gcr.io/google_containers/glbc
|
||||
|
||||
server:
|
||||
CGO_ENABLED=0 GOOS=linux godep go build -a -installsuffix cgo -ldflags '-w' -o glbc *.go
|
||||
|
||||
container: server
|
||||
docker build --pull -t $(PREFIX):$(TAG) .
|
||||
|
||||
push: container
|
||||
gcloud docker -- push $(PREFIX):$(TAG)
|
||||
|
||||
clean:
|
||||
rm -f glbc
|
|
@ -1,6 +0,0 @@
|
|||
approvers:
|
||||
- nicksardo
|
||||
- thockin
|
||||
- freehan
|
||||
- csbell
|
||||
- bprashanth
|
|
@ -1,712 +0,0 @@
|
|||
# GLBC
|
||||
|
||||
GLBC is a GCE L7 load balancer controller that manages external loadbalancers configured through the Kubernetes Ingress API.
|
||||
|
||||
## A word to the wise
|
||||
|
||||
Please read the [beta limitations](BETA_LIMITATIONS.md) doc to before using this controller. In summary:
|
||||
|
||||
- This is a **work in progress**.
|
||||
- It relies on a beta Kubernetes resource.
|
||||
- The loadbalancer controller pod is not aware of your GCE quota.
|
||||
|
||||
## Overview
|
||||
|
||||
__A reminder on GCE L7__: Google Compute Engine does not have a single resource that represents a L7 loadbalancer. When a user request comes in, it is first handled by the global forwarding rule, which sends the traffic to an HTTP proxy service that sends the traffic to a URL map that parses the URL to see which backend service will handle the request. Each backend service is assigned a set of virtual machine instances grouped into instance groups.
|
||||
|
||||
__A reminder on Services__: A Kubernetes Service defines a set of pods and a means by which to access them, such as single stable IP address and corresponding DNS name. This IP defaults to a cluster VIP in a private address range. You can direct ingress traffic to a particular Service by setting its `Type` to NodePort or LoadBalancer. NodePort opens up a port on *every* node in your cluster and proxies traffic to the endpoints of your service, while LoadBalancer allocates an L4 cloud loadbalancer.
|
||||
|
||||
### L7 Load balancing on Kubernetes
|
||||
|
||||
To achieve L7 loadbalancing through Kubernetes, we employ a resource called `Ingress`. The Ingress is consumed by this loadbalancer controller, which creates the following GCE resource graph:
|
||||
|
||||
[Global Forwarding Rule](https://cloud.google.com/compute/docs/load-balancing/http/global-forwarding-rules) -> [TargetHttpProxy](https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) -> [Url Map](https://cloud.google.com/compute/docs/load-balancing/http/url-map) -> [Backend Service](https://cloud.google.com/compute/docs/load-balancing/http/backend-service) -> [Instance Group](https://cloud.google.com/compute/docs/instance-groups/)
|
||||
|
||||
The controller (glbc) manages the lifecycle of each component in the graph. It uses the Kubernetes resources as a spec for the desired state, and the GCE cloud resources as the observed state, and drives the observed to the desired. If an edge is disconnected, it fixes it. Each Ingress translates to a new GCE L7, and the rules on the Ingress become paths in the GCE Url Map. This allows you to route traffic to various backend Kubernetes Services through a single public IP, which is in contrast to `Type=LoadBalancer`, which allocates a public IP *per* Kubernetes Service. For this to work, the Kubernetes Service *must* have Type=NodePort.
|
||||
|
||||
### The Ingress
|
||||
|
||||
An Ingress in Kubernetes is a REST object, similar to a Service. A minimal Ingress might look like:
|
||||
|
||||
```yaml
|
||||
01. apiVersion: extensions/v1beta1
|
||||
02. kind: Ingress
|
||||
03. metadata:
|
||||
04. name: hostlessendpoint
|
||||
05. spec:
|
||||
06. rules:
|
||||
07. - http:
|
||||
08. paths:
|
||||
09. - path: /hostless
|
||||
10. backend:
|
||||
11. serviceName: test
|
||||
12. servicePort: 80
|
||||
```
|
||||
|
||||
POSTing this to the Kubernetes API server would result in glbc creating a GCE L7 that routes all traffic sent to `http://ip-of-loadbalancer/hostless` to :80 of the service named `test`. If the service doesn't exist yet, or doesn't have a nodePort, glbc will allocate an IP and wait till it does. Once the Service shows up, it will create the required path rules to route traffic to it.
|
||||
|
||||
__Lines 1-4__: Resource metadata used to tag GCE resources. For example, if you go to the console you would see a url map called: k8-fw-default-hostlessendpoint, where default is the namespace and hostlessendpoint is the name of the resource. The Kubernetes API server ensures that namespace/name is unique so there will never be any collisions.
|
||||
|
||||
__Lines 5-7__: Ingress Spec has all the information needed to configure a GCE L7. Most importantly, it contains a list of `rules`. A rule can take many forms, but the only rule relevant to glbc is the `http` rule.
|
||||
|
||||
__Lines 8-9__: Each http rule contains the following information: A host (eg: foo.bar.com, defaults to `*` in this example), a list of paths (eg: `/hostless`) each of which has an associated backend (`test:80`). Both the `host` and `path` must match the content of an incoming request before the L7 directs traffic to the `backend`.
|
||||
|
||||
__Lines 10-12__: A `backend` is a service:port combination. It selects a group of pods capable of servicing traffic sent to the path specified in the parent rule. The `port` is the desired `spec.ports[*].port` from the Service Spec -- Note, though, that the L7 actually directs traffic to the corresponding `NodePort`.
|
||||
|
||||
__Global Parameters__: For the sake of simplicity the example Ingress has no global parameters. However, one can specify a default backend (see examples below) in the absence of which requests that don't match a path in the spec are sent to the default backend of glbc.
|
||||
|
||||
|
||||
## Load Balancer Management
|
||||
|
||||
You can manage a GCE L7 by creating/updating/deleting the associated Kubernetes Ingress.
|
||||
|
||||
### Creation
|
||||
|
||||
Before you can start creating Ingress you need to start up glbc. We can use the rc.yaml in this directory:
|
||||
```shell
|
||||
$ kubectl create -f rc.yaml
|
||||
replicationcontroller "glbc" created
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
glbc-6m6b6 2/2 Running 0 21s
|
||||
|
||||
```
|
||||
|
||||
A couple of things to note about this controller:
|
||||
* It needs a service with a node port to use as the default backend. This is the backend that's used when an Ingress does not specify the default.
|
||||
* It has an intentionally long terminationGracePeriod, this is only required with the --delete-all-on-quit flag (see [Deletion](#deletion))
|
||||
* Don't start 2 instances of the controller in a single cluster, they will fight each other.
|
||||
|
||||
The loadbalancer controller will watch for Services, Nodes and Ingress. Nodes already exist (the nodes in your cluster). We need to create the other 2. You can do so using the ingress-app.yaml in this directory.
|
||||
|
||||
A couple of things to note about the Ingress:
|
||||
* It creates a Replication Controller for a simple echoserver application, with 1 replica.
|
||||
* It creates 3 services for the same application pod: echoheaders[x, y, default]
|
||||
* It creates an Ingress with 2 hostnames and 3 endpoints (foo.bar.com{/foo} and bar.baz.com{/foo, /bar}) that access the given service
|
||||
|
||||
```shell
|
||||
$ kubectl create -f ingress-app.yaml
|
||||
$ kubectl get svc
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
echoheadersdefault 10.0.43.119 nodes 80/TCP app=echoheaders 16m
|
||||
echoheadersx 10.0.126.10 nodes 80/TCP app=echoheaders 16m
|
||||
echoheadersy 10.0.134.238 nodes 80/TCP app=echoheaders 16m
|
||||
Kubernetes 10.0.0.1 <none> 443/TCP <none> 21h
|
||||
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
echomap - echoheadersdefault:80
|
||||
foo.bar.com
|
||||
/foo echoheadersx:80
|
||||
bar.baz.com
|
||||
/bar echoheadersy:80
|
||||
/foo echoheadersx:80
|
||||
```
|
||||
|
||||
You can tail the logs of the controller to observe its progress:
|
||||
```
|
||||
$ kubectl logs --follow glbc-6m6b6 l7-lb-controller
|
||||
I1005 22:11:26.731845 1 instances.go:48] Creating instance group k8-ig-foo
|
||||
I1005 22:11:34.360689 1 controller.go:152] Created new loadbalancer controller
|
||||
I1005 22:11:34.360737 1 controller.go:172] Starting loadbalancer controller
|
||||
I1005 22:11:34.380757 1 controller.go:206] Syncing default/echomap
|
||||
I1005 22:11:34.380763 1 loadbalancer.go:134] Syncing loadbalancers [default/echomap]
|
||||
I1005 22:11:34.380810 1 loadbalancer.go:100] Creating l7 default-echomap
|
||||
I1005 22:11:34.385161 1 utils.go:83] Syncing e2e-test-beeps-minion-ugv1
|
||||
...
|
||||
```
|
||||
|
||||
When it's done, it will update the status of the Ingress with the ip of the L7 it created:
|
||||
```shell
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
echomap - echoheadersdefault:80 107.178.254.239
|
||||
foo.bar.com
|
||||
/foo echoheadersx:80
|
||||
bar.baz.com
|
||||
/bar echoheadersy:80
|
||||
/foo echoheadersx:80
|
||||
```
|
||||
|
||||
Go to your GCE console and confirm that the following resources have been created through the HTTPLoadbalancing panel:
|
||||
* A Global Forwarding Rule
|
||||
* An UrlMap
|
||||
* A TargetHTTPProxy
|
||||
* BackendServices (one for each Kubernetes nodePort service)
|
||||
* An Instance Group (with ports corresponding to the BackendServices)
|
||||
|
||||
The HTTPLoadBalancing panel will also show you if your backends have responded to the health checks, wait till they do. This can take a few minutes. If you see `Health status will display here once configuration is complete.` the L7 is still bootstrapping. Wait till you have `Healthy instances: X`. Even though the GCE L7 is driven by our controller, which notices the Kubernetes healthchecks of a pod, we still need to wait on the first GCE L7 health check to complete. Once your backends are up and healthy:
|
||||
|
||||
```shell
|
||||
$ curl --resolve foo.bar.com:80:107.178.245.239 http://foo.bar.com/foo
|
||||
CLIENT VALUES:
|
||||
client_address=('10.240.29.196', 56401) (10.240.29.196)
|
||||
command=GET
|
||||
path=/echoheadersx
|
||||
real path=/echoheadersx
|
||||
query=
|
||||
request_version=HTTP/1.1
|
||||
|
||||
SERVER VALUES:
|
||||
server_version=BaseHTTP/0.6
|
||||
sys_version=Python/3.4.3
|
||||
protocol_version=HTTP/1.0
|
||||
|
||||
HEADERS RECEIVED:
|
||||
Accept=*/*
|
||||
Connection=Keep-Alive
|
||||
Host=107.178.254.239
|
||||
User-Agent=curl/7.35.0
|
||||
Via=1.1 google
|
||||
X-Forwarded-For=216.239.45.73, 107.178.254.239
|
||||
X-Forwarded-Proto=http
|
||||
```
|
||||
|
||||
You can also edit `/etc/hosts` instead of using `--resolve`.
|
||||
|
||||
#### Updates
|
||||
|
||||
Say you don't want a default backend and you'd like to allow all traffic hitting your loadbalancer at /foo to reach your echoheaders backend service, not just the traffic for foo.bar.com. You can modify the Ingress Spec:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /foo
|
||||
..
|
||||
```
|
||||
|
||||
and replace the existing Ingress (ignore errors about replacing the Service, we're using the same .yaml file but we only care about the Ingress):
|
||||
|
||||
```
|
||||
$ kubectl replace -f ingress-app.yaml
|
||||
ingress "echomap" replaced
|
||||
|
||||
$ curl http://107.178.254.239/foo
|
||||
CLIENT VALUES:
|
||||
client_address=('10.240.143.179', 59546) (10.240.143.179)
|
||||
command=GET
|
||||
path=/foo
|
||||
real path=/foo
|
||||
...
|
||||
|
||||
$ curl http://107.178.254.239/
|
||||
<pre>
|
||||
INTRODUCTION
|
||||
============
|
||||
This is an nginx webserver for simple loadbalancer testing. It works well
|
||||
for me but it might not have some of the features you want. If you would
|
||||
...
|
||||
```
|
||||
|
||||
A couple of things to note about this particular update:
|
||||
* An Ingress without a default backend inherits the backend of the Ingress controller.
|
||||
* A IngressRule without a host gets the wildcard. This is controller specific, some loadbalancer controllers do not respect anything but a DNS subdomain as the host. You *cannot* set the host to a regex.
|
||||
* You never want to delete then re-create an Ingress, as it will result in the controller tearing down and recreating the loadbalancer.
|
||||
|
||||
__Unexpected updates__: Since glbc constantly runs a control loop it won't allow you to break links that black hole traffic. An easy link to break is the url map itself, but you can also disconnect a target proxy from the urlmap, or remove an instance from the instance group (note this is different from *deleting* the instance, the loadbalancer controller will not recreate it if you do so). Modify one of the url links in the map to point to another backend through the GCE Control Panel UI, and wait till the controller sync (this happens as frequently as you tell it to, via the --resync-period flag). The same goes for the Kubernetes side of things, the API server will validate against obviously bad updates, but if you relink an Ingress so it points to the wrong backends the controller will blindly follow.
|
||||
|
||||
### Paths
|
||||
|
||||
Till now, our examples were simplified in that they hit an endpoint with a catch-all path regex. Most real world backends have subresources. Let's create service to test how the loadbalancer handles paths:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: nginxtest
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginxtest
|
||||
spec:
|
||||
containers:
|
||||
- name: nginxtest
|
||||
image: bprashanth/nginxtest:1.0
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginxtest
|
||||
labels:
|
||||
app: nginxtest
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: nginxtest
|
||||
```
|
||||
|
||||
Running kubectl create against this manifest will give you a service with multiple endpoints:
|
||||
```shell
|
||||
$ kubectl get svc nginxtest -o yaml | grep -i nodeport:
|
||||
nodePort: 30404
|
||||
$ curl nodeip:30404/
|
||||
ENDPOINTS
|
||||
=========
|
||||
<a href="hostname">hostname</a>: An endpoint to query the hostname.
|
||||
<a href="stress">stress</a>: An endpoint to stress the host.
|
||||
<a href="fs/index.html">fs</a>: A file system for static content.
|
||||
|
||||
```
|
||||
You can put the nodeip:port into your browser and play around with the endpoints so you're familiar with what to expect. We will test the `/hostname` and `/fs/files/nginx.html` endpoints. Modify/create your Ingress:
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: nginxtest-ingress
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /hostname
|
||||
backend:
|
||||
serviceName: nginxtest
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
And check the endpoint (you will have to wait till the update takes effect, this could be a few minutes):
|
||||
```shell
|
||||
$ kubectl replace -f ingress.yaml
|
||||
$ curl loadbalancerip/hostname
|
||||
nginx-tester-pod-name
|
||||
```
|
||||
|
||||
Note what just happened, the endpoint exposes /hostname, and the loadbalancer forwarded the entire matching url to the endpoint. This means if you had '/foo' in the Ingress and tried accessing /hostname, your endpoint would've received /foo/hostname and not known how to route it. Now update the Ingress to access static content via the /fs endpoint:
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: nginxtest-ingress
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /fs/*
|
||||
backend:
|
||||
serviceName: nginxtest
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
As before, wait a while for the update to take effect, and try accessing `loadbalancerip/fs/files/nginx.html`.
|
||||
|
||||
#### Deletion
|
||||
|
||||
Most production loadbalancers live as long as the nodes in the cluster and are torn down when the nodes are destroyed. That said, there are plenty of use cases for deleting an Ingress, deleting a loadbalancer controller, or just purging external loadbalancer resources altogether. Deleting a loadbalancer controller pod will not affect the loadbalancers themselves, this way your backends won't suffer a loss of availability if the scheduler pre-empts your controller pod. Deleting a single loadbalancer is as easy as deleting an Ingress via kubectl:
|
||||
```shell
|
||||
$ kubectl delete ing echomap
|
||||
$ kubectl logs --follow glbc-6m6b6 l7-lb-controller
|
||||
I1007 00:25:45.099429 1 loadbalancer.go:144] Deleting lb default-echomap
|
||||
I1007 00:25:45.099432 1 loadbalancer.go:437] Deleting global forwarding rule k8-fw-default-echomap
|
||||
I1007 00:25:54.885823 1 loadbalancer.go:444] Deleting target proxy k8-tp-default-echomap
|
||||
I1007 00:25:58.446941 1 loadbalancer.go:451] Deleting url map k8-um-default-echomap
|
||||
I1007 00:26:02.043065 1 backends.go:176] Deleting backends []
|
||||
I1007 00:26:02.043188 1 backends.go:134] Deleting backend k8-be-30301
|
||||
I1007 00:26:05.591140 1 backends.go:134] Deleting backend k8-be-30284
|
||||
I1007 00:26:09.159016 1 controller.go:232] Finished syncing default/echomap
|
||||
```
|
||||
Note that it takes ~30 seconds to purge cloud resources, the API calls to create and delete are a onetime cost. GCE BackendServices are ref-counted and deleted by the controller as you delete Kubernetes Ingress'. This is not sufficient for cleanup, because you might have deleted the Ingress while glbc was down, in which case it would leak cloud resources. You can delete the glbc and purge cloud resources in 2 more ways:
|
||||
|
||||
__The dev/test way__: If you want to delete everything in the cloud when the loadbalancer controller pod dies, start it with the --delete-all-on-quit flag. When a pod is killed it's first sent a SIGTERM, followed by a grace period (set to 10minutes for loadbalancer controllers), followed by a SIGKILL. The controller pod uses this time to delete cloud resources. Be careful with --delete-all-on-quit, because if you're running a production glbc and the scheduler re-schedules your pod for some reason, it will result in a loss of availability. You can do this because your rc.yaml has:
|
||||
```yaml
|
||||
args:
|
||||
# auto quit requires a high termination grace period.
|
||||
- --delete-all-on-quit=true
|
||||
```
|
||||
|
||||
So simply delete the replication controller:
|
||||
```shell
|
||||
$ kubectl get rc glbc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE
|
||||
glbc default-http-backend gcr.io/google_containers/defaultbackend:1.0 k8s-app=glbc,version=v0.5 1 2m
|
||||
l7-lb-controller gcr.io/google_containers/glbc:0.9.6
|
||||
|
||||
$ kubectl delete rc glbc
|
||||
replicationcontroller "glbc" deleted
|
||||
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
glbc-6m6b6 1/1 Terminating 0 13m
|
||||
```
|
||||
|
||||
__The prod way__: If you didn't start the controller with `--delete-all-on-quit`, you can execute a GET on the `/delete-all-and-quit` endpoint. This endpoint is deliberately not exported.
|
||||
|
||||
```shell
|
||||
$ kubectl exec -it glbc-6m6b6 -- wget -q -O- http://localhost:8081/delete-all-and-quit
|
||||
..Hangs till quit is done..
|
||||
|
||||
$ kubectl logs glbc-6m6b6 --follow
|
||||
I1007 00:26:09.159016 1 controller.go:232] Finished syncing default/echomap
|
||||
I1007 00:29:30.321419 1 controller.go:192] Shutting down controller queues.
|
||||
I1007 00:29:30.321970 1 controller.go:199] Shutting down cluster manager.
|
||||
I1007 00:29:30.321574 1 controller.go:178] Shutting down Loadbalancer Controller
|
||||
I1007 00:29:30.322378 1 main.go:160] Handled quit, awaiting pod deletion.
|
||||
I1007 00:29:30.321977 1 loadbalancer.go:154] Creating loadbalancers []
|
||||
I1007 00:29:30.322617 1 loadbalancer.go:192] Loadbalancer pool shutdown.
|
||||
I1007 00:29:30.322622 1 backends.go:176] Deleting backends []
|
||||
I1007 00:30:00.322528 1 main.go:160] Handled quit, awaiting pod deletion.
|
||||
I1007 00:30:30.322751 1 main.go:160] Handled quit, awaiting pod deletion
|
||||
```
|
||||
|
||||
You just instructed the loadbalancer controller to quit, however if it had done so, the replication controller would've just created another pod, so it waits around till you delete the rc.
|
||||
|
||||
#### Health checks
|
||||
|
||||
Currently, all service backends must satisfy *either* of the following requirements to pass the HTTP(S) health checks sent to it from the GCE loadbalancer:
|
||||
1. Respond with a 200 on '/'. The content does not matter.
|
||||
2. Expose an arbitrary url as a `readiness` probe on the pods backing the Service.
|
||||
|
||||
The Ingress controller looks for a compatible readiness probe first, if it finds one, it adopts it as the GCE loadbalancer's HTTP(S) health check. If there's no readiness probe, or the readiness probe requires special HTTP headers, the Ingress controller points the GCE loadbalancer's HTTP health check at '/'. [This is an example](examples/health_checks/README.md) of an Ingress that adopts the readiness probe from the endpoints as its health check.
|
||||
|
||||
## Frontend HTTPS
|
||||
For encrypted communication between the client to the load balancer, you can secure an Ingress by specifying a [secret](http://kubernetes.io/docs/user-guide/secrets) that contains a TLS private key and certificate. Currently the Ingress only supports a single TLS port, 443, and assumes TLS termination. This controller does not support SNI, so it will ignore all but the first cert in the TLS configuration section. The TLS secret must [contain keys](https://github.com/kubernetes/kubernetes/blob/master/pkg/api/types.go#L2696) named `tls.crt` and `tls.key` that contain the certificate and private key to use for TLS, eg:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: testsecret
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
tls.crt: base64 encoded cert
|
||||
tls.key: base64 encoded key
|
||||
```
|
||||
|
||||
Referencing this secret in an Ingress will tell the Ingress controller to secure the channel from the client to the loadbalancer using TLS.
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: no-rules-map
|
||||
spec:
|
||||
tls:
|
||||
- secretName: testsecret
|
||||
backend:
|
||||
serviceName: s1
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
This creates 2 GCE forwarding rules that use a single static ip. Both `:80` and `:443` will direct traffic to your backend, which serves HTTP requests on the target port mentioned in the Service associated with the Ingress.
|
||||
|
||||
## Backend HTTPS
|
||||
For encrypted communication between the load balancer and your Kubernetes service, you need to decorate the service's port as expecting HTTPS. There's an alpha [Service annotation](examples/backside_https/app.yaml) for specifying the expected protocol per service port. Upon seeing the protocol as HTTPS, the ingress controller will assemble a GCP L7 load balancer with an HTTPS backend-service with a HTTPS health check.
|
||||
|
||||
The annotation value is a stringified JSON map of port-name to "HTTPS" or "HTTP". If you do not specify the port, "HTTP" is assumed.
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: my-echo-svc
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/app-protocols: '{"my-https-port":"HTTPS"}'
|
||||
labels:
|
||||
app: echo
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
name: my-https-port
|
||||
selector:
|
||||
app: echo
|
||||
```
|
||||
|
||||
#### Redirecting HTTP to HTTPS
|
||||
|
||||
To redirect traffic from `:80` to `:443` you need to examine the `x-forwarded-proto` header inserted by the GCE L7, since the Ingress does not support redirect rules. In nginx, this is as simple as adding the following lines to your config:
|
||||
```nginx
|
||||
# Replace '_' with your hostname.
|
||||
server_name _;
|
||||
if ($http_x_forwarded_proto = "http") {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
```
|
||||
|
||||
Here's an example that demonstrates it, first lets create a self signed certificate valid for upto a year:
|
||||
|
||||
```console
|
||||
$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=foobar.com"
|
||||
$ kubectl create secret tls tls-secret --key=/tmp/tls.key --cert=/tmp/tls.crt
|
||||
secret "tls-secret" created
|
||||
```
|
||||
|
||||
Then the Services/Ingress to use it:
|
||||
|
||||
```yaml
|
||||
$ echo "
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheaders-https
|
||||
labels:
|
||||
app: echoheaders-https
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders-https
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: echoheaders-https
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: echoheaders-https
|
||||
spec:
|
||||
containers:
|
||||
- name: echoheaders-https
|
||||
image: gcr.io/google_containers/echoserver:1.3
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
tls:
|
||||
- secretName: tls-secret
|
||||
backend:
|
||||
serviceName: echoheaders-https
|
||||
servicePort: 80
|
||||
" | kubectl create -f -
|
||||
```
|
||||
|
||||
This creates 2 GCE forwarding rules that use a single static ip. Port `80` redirects to port `443` which terminates TLS and sends traffic to your backend.
|
||||
|
||||
```console
|
||||
$ kubectl get ing
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
test * 80, 443 5s
|
||||
|
||||
$ kubectl describe ing
|
||||
Name: test
|
||||
Namespace: default
|
||||
Address: 130.211.21.233
|
||||
Default backend: echoheaders-https:80 (10.180.1.7:8080,10.180.2.3:8080)
|
||||
TLS:
|
||||
tls-secret terminates
|
||||
Rules:
|
||||
Host Path Backends
|
||||
---- ---- --------
|
||||
* * echoheaders-https:80 (10.180.1.7:8080,10.180.2.3:8080)
|
||||
Annotations:
|
||||
url-map: k8s-um-default-test--7d2d86e772b6c246
|
||||
backends: {"k8s-be-32327--7d2d86e772b6c246":"HEALTHY"}
|
||||
forwarding-rule: k8s-fw-default-test--7d2d86e772b6c246
|
||||
https-forwarding-rule: k8s-fws-default-test--7d2d86e772b6c246
|
||||
https-target-proxy: k8s-tps-default-test--7d2d86e772b6c246
|
||||
static-ip: k8s-fw-default-test--7d2d86e772b6c246
|
||||
target-proxy: k8s-tp-default-test--7d2d86e772b6c246
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
12m 12m 1 {loadbalancer-controller } Normal ADD default/test
|
||||
4m 4m 1 {loadbalancer-controller } Normal CREATE ip: 130.211.21.233
|
||||
```
|
||||
|
||||
Testing reachability:
|
||||
```console
|
||||
$ curl 130.211.21.233 -kL
|
||||
CLIENT VALUES:
|
||||
client_address=10.240.0.4
|
||||
command=GET
|
||||
real path=/
|
||||
query=nil
|
||||
request_version=1.1
|
||||
request_uri=http://130.211.21.233:8080/
|
||||
...
|
||||
|
||||
$ curl --resolve foobar.in:443:130.211.21.233 https://foobar.in --cacert /tmp/tls.crt
|
||||
CLIENT VALUES:
|
||||
client_address=10.240.0.4
|
||||
command=GET
|
||||
real path=/
|
||||
query=nil
|
||||
request_version=1.1
|
||||
request_uri=http://bitrot.com:8080/
|
||||
...
|
||||
|
||||
$ curl --resolve bitrot.in:443:130.211.21.233 https://foobar.in --cacert /tmp/tls.crt
|
||||
curl: (51) SSL: certificate subject name 'foobar.in' does not match target host name 'foobar.in'
|
||||
```
|
||||
|
||||
Note that the GCLB health checks *do not* get the `301` because they don't include `x-forwarded-proto`.
|
||||
|
||||
#### Blocking HTTP
|
||||
|
||||
You can block traffic on `:80` through an annotation. You might want to do this if all your clients are only going to hit the loadbalancer through https and you don't want to waste the extra GCE forwarding rule, eg:
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: test
|
||||
annotations:
|
||||
kubernetes.io/ingress.allow-http: "false"
|
||||
spec:
|
||||
tls:
|
||||
# This assumes tls-secret exists.
|
||||
# To generate it run the make in this directory.
|
||||
- secretName: tls-secret
|
||||
backend:
|
||||
serviceName: echoheaders-https
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
Upon describing it you should only see a single GCE forwarding rule:
|
||||
```console
|
||||
$ kubectl describe ing
|
||||
Name: test
|
||||
Namespace: default
|
||||
Address: 130.211.10.121
|
||||
Default backend: echoheaders-https:80 (10.245.2.4:8080,10.245.3.4:8080)
|
||||
TLS:
|
||||
tls-secret terminates
|
||||
Rules:
|
||||
Host Path Backends
|
||||
---- ---- --------
|
||||
Annotations:
|
||||
https-target-proxy: k8s-tps-default-test--uid
|
||||
url-map: k8s-um-default-test--uid
|
||||
backends: {"k8s-be-31644--uid":"Unknown"}
|
||||
https-forwarding-rule: k8s-fws-default-test--uid
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
13m 13m 1 {loadbalancer-controller } Normal ADD default/test
|
||||
12m 12m 1 {loadbalancer-controller } Normal CREATE ip: 130.211.10.121
|
||||
```
|
||||
|
||||
And curling `:80` should just `404`:
|
||||
```console
|
||||
$ curl 130.211.10.121
|
||||
...
|
||||
<a href=//www.google.com/><span id=logo aria-label=Google></span></a>
|
||||
<p><b>404.</b> <ins>That’s an error.</ins>
|
||||
|
||||
$ curl https://130.211.10.121 -k
|
||||
...
|
||||
SERVER VALUES:
|
||||
server_version=nginx: 1.9.11 - lua: 10001
|
||||
```
|
||||
|
||||
## Troubleshooting:
|
||||
|
||||
This controller is complicated because it exposes a tangled set of external resources as a single logical abstraction. It's recommended that you are at least *aware* of how one creates a GCE L7 [without a kubernetes Ingress](https://cloud.google.com/container-engine/docs/tutorials/http-balancer). If weird things happen, here are some basic debugging guidelines:
|
||||
|
||||
* Check loadbalancer controller pod logs via kubectl
|
||||
A typical sign of trouble is repeated retries in the logs:
|
||||
```shell
|
||||
I1006 18:58:53.451869 1 loadbalancer.go:268] Forwarding rule k8-fw-default-echomap already exists
|
||||
I1006 18:58:53.451955 1 backends.go:162] Syncing backends [30301 30284 30301]
|
||||
I1006 18:58:53.451998 1 backends.go:134] Deleting backend k8-be-30302
|
||||
E1006 18:58:57.029253 1 utils.go:71] Requeuing default/echomap, err googleapi: Error 400: The backendService resource 'projects/Kubernetesdev/global/backendServices/k8-be-30302' is already being used by 'projects/Kubernetesdev/global/urlMaps/k8-um-default-echomap'
|
||||
I1006 18:58:57.029336 1 utils.go:83] Syncing default/echomap
|
||||
```
|
||||
|
||||
This could be a bug or quota limitation. In the case of the former, please head over to slack or github.
|
||||
|
||||
* If you see a GET hanging, followed by a 502 with the following response:
|
||||
|
||||
```
|
||||
<html><head>
|
||||
<meta http-equiv="content-type" content="text/html;charset=utf-8">
|
||||
<title>502 Server Error</title>
|
||||
</head>
|
||||
<body text=#000000 bgcolor=#ffffff>
|
||||
<h1>Error: Server Error</h1>
|
||||
<h2>The server encountered a temporary error and could not complete your request.<p>Please try again in 30 seconds.</h2>
|
||||
<h2></h2>
|
||||
</body></html>
|
||||
```
|
||||
The loadbalancer is probably bootstrapping itself.
|
||||
|
||||
* If a GET responds with a 404 and the following response:
|
||||
```
|
||||
<a href=//www.google.com/><span id=logo aria-label=Google></span></a>
|
||||
<p><b>404.</b> <ins>That’s an error.</ins>
|
||||
<p>The requested URL <code>/hostless</code> was not found on this server. <ins>That’s all we know.</ins>
|
||||
```
|
||||
It means you have lost your IP somehow, or just typed in the wrong IP.
|
||||
|
||||
* If you see requests taking an abnormal amount of time, run the echoheaders pod and look for the client address
|
||||
```shell
|
||||
CLIENT VALUES:
|
||||
client_address=('10.240.29.196', 56401) (10.240.29.196)
|
||||
```
|
||||
|
||||
Then head over to the GCE node with internal ip 10.240.29.196 and check that the [Service is functioning](https://github.com/kubernetes/kubernetes/blob/release-1.0/docs/user-guide/debugging-services.md) as expected. Remember that the GCE L7 is routing you through the NodePort service, and try to trace back.
|
||||
|
||||
* Check if you can access the backend service directly via nodeip:nodeport
|
||||
* Check the GCE console
|
||||
* Make sure you only have a single loadbalancer controller running
|
||||
* Make sure the initial GCE health checks have passed
|
||||
* A crash loop looks like:
|
||||
```shell
|
||||
$ kubectl get pods
|
||||
glbc-fjtlq 0/1 CrashLoopBackOff 17 1h
|
||||
```
|
||||
If you hit that it means the controller isn't even starting. Re-check your input flags, especially the required ones.
|
||||
|
||||
## Creating the firewall rule for GLBC health checks
|
||||
|
||||
A default GKE/GCE cluster needs at least 1 firewall rule for GLBC to function. The Ingress controller should create this for you automatically. You can also create it thus:
|
||||
```console
|
||||
$ gcloud compute firewall-rules create allow-130-211-0-0-22 \
|
||||
--source-ranges 130.211.0.0/22,35.191.0.0/16 \
|
||||
--target-tags $TAG \
|
||||
--allow tcp:$NODE_PORT
|
||||
```
|
||||
|
||||
Where `130.211.0.0/22` and `35.191.0.0/16` are the source ranges of the GCE L7, `$NODE_PORT` is the node port your Service is exposed on, i.e:
|
||||
```console
|
||||
$ kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services ${SERVICE_NAME}
|
||||
```
|
||||
|
||||
and `$TAG` is an optional list of GKE instance tags, i.e:
|
||||
```console
|
||||
$ kubectl get nodes | awk '{print $1}' | tail -n +2 | grep -Po 'gke-[0-9,a-z]+-[0-9,a-z]+-node' | uniq
|
||||
```
|
||||
|
||||
## GLBC Implementation Details
|
||||
|
||||
For the curious, here is a high level overview of how the GCE LoadBalancer controller manages cloud resources.
|
||||
|
||||
The controller manages cloud resources through a notion of pools. Each pool is the representation of the last known state of a logical cloud resource. Pools are periodically synced with the desired state, as reflected by the Kubernetes api. When you create a new Ingress, the following happens:
|
||||
* Create BackendServices for each Kubernetes backend in the Ingress, through the backend pool.
|
||||
* Add nodePorts for each BackendService to an Instance Group with all the instances in your cluster, through the instance pool.
|
||||
* Create a UrlMap, TargetHttpProxy, Global Forwarding Rule through the loadbalancer pool.
|
||||
* Update the loadbalancer's urlmap according to the Ingress.
|
||||
|
||||
Periodically, each pool checks that it has a valid connection to the next hop in the above resource graph. So for example, the backend pool will check that each backend is connected to the instance group and that the node ports match, the instance group will check that all the Kubernetes nodes are a part of the instance group, and so on. Since Backends are a limited resource, they're shared (well, everything is limited by your quota, this applies doubly to backend services). This means you can setup N Ingress' exposing M services through different paths and the controller will only create M backends. When all the Ingress' are deleted, the backend pool GCs the backend.
|
||||
|
||||
## Wish list:
|
||||
|
||||
* More E2e, integration tests
|
||||
* Better events
|
||||
* Detect leaked resources even if the Ingress has been deleted when the controller isn't around
|
||||
* Specify health checks (currently we just rely on kubernetes service/pod liveness probes and force pods to have a `/` endpoint that responds with 200 for GCE)
|
||||
* Alleviate the NodePort requirement for Service Type=LoadBalancer.
|
||||
* Async pool management of backends/L7s etc
|
||||
* Retry back-off when GCE Quota is done
|
||||
* GCE Quota integration
|
||||
|
||||
[]()
|
|
@ -1,491 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backends
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// BalancingMode represents the loadbalancing configuration of an individual
|
||||
// Backend in a BackendService. This is *effectively* a cluster wide setting
|
||||
// since you can't mix modes across Backends pointing to the same IG, and you
|
||||
// can't have a single node in more than 1 loadbalanced IG.
|
||||
type BalancingMode string
|
||||
|
||||
const (
|
||||
// Rate balances incoming requests based on observed RPS.
|
||||
// As of this writing, it's the only balancing mode supported by GCE's
|
||||
// internal LB. This setting doesn't make sense for Kubernets clusters
|
||||
// because requests can get proxied between instance groups in different
|
||||
// zones by kube-proxy without GCE even knowing it. Setting equal RPS on
|
||||
// all IGs should achieve roughly equal distribution of requests.
|
||||
Rate BalancingMode = "RATE"
|
||||
// Utilization balances incoming requests based on observed utilization.
|
||||
// This mode is only useful if you want to divert traffic away from IGs
|
||||
// running other compute intensive workloads. Utilization statistics are
|
||||
// aggregated per instances, not per container, and requests can get proxied
|
||||
// between instance groups in different zones by kube-proxy without GCE even
|
||||
// knowing about it.
|
||||
Utilization BalancingMode = "UTILIZATION"
|
||||
// Connections balances incoming requests based on a connection counter.
|
||||
// This setting currently doesn't make sense for Kubernetes clusters,
|
||||
// because we use NodePort Services as HTTP LB backends, so GCE's connection
|
||||
// counters don't accurately represent connections per container.
|
||||
Connections BalancingMode = "CONNECTION"
|
||||
)
|
||||
|
||||
// maxRPS is the RPS setting for all Backends with BalancingMode RATE. The exact
|
||||
// value doesn't matter, as long as it's the same for all Backends. Requests
|
||||
// received by GCLB above this RPS are NOT dropped, GCLB continues to distribute
|
||||
// them across IGs.
|
||||
// TODO: Should this be math.MaxInt64?
|
||||
const maxRPS = 1
|
||||
|
||||
// Backends implements BackendPool.
|
||||
type Backends struct {
|
||||
cloud BackendServices
|
||||
nodePool instances.NodePool
|
||||
healthChecker healthchecks.HealthChecker
|
||||
snapshotter storage.Snapshotter
|
||||
prober probeProvider
|
||||
// ignoredPorts are a set of ports excluded from GC, even
|
||||
// after the Ingress has been deleted. Note that invoking
|
||||
// a Delete() on these ports will still delete the backend.
|
||||
ignoredPorts sets.String
|
||||
namer *utils.Namer
|
||||
}
|
||||
|
||||
func portKey(port int64) string {
|
||||
return fmt.Sprintf("%d", port)
|
||||
}
|
||||
|
||||
// ServicePort for tupling port and protocol
|
||||
type ServicePort struct {
|
||||
Port int64
|
||||
Protocol utils.AppProtocol
|
||||
SvcName types.NamespacedName
|
||||
SvcPort intstr.IntOrString
|
||||
}
|
||||
|
||||
// Description returns a string describing the ServicePort.
|
||||
func (sp ServicePort) Description() string {
|
||||
if sp.SvcName.String() == "" || sp.SvcPort.String() == "" {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf(`{"kubernetes.io/service-name":"%s","kubernetes.io/service-port":"%s"}`, sp.SvcName.String(), sp.SvcPort.String())
|
||||
}
|
||||
|
||||
// NewBackendPool returns a new backend pool.
|
||||
// - cloud: implements BackendServices and syncs backends with a cloud provider
|
||||
// - healthChecker: is capable of producing health checks for backends.
|
||||
// - nodePool: implements NodePool, used to create/delete new instance groups.
|
||||
// - namer: procudes names for backends.
|
||||
// - ignorePorts: is a set of ports to avoid syncing/GCing.
|
||||
// - resyncWithCloud: if true, periodically syncs with cloud resources.
|
||||
func NewBackendPool(
|
||||
cloud BackendServices,
|
||||
healthChecker healthchecks.HealthChecker,
|
||||
nodePool instances.NodePool,
|
||||
namer *utils.Namer,
|
||||
ignorePorts []int64,
|
||||
resyncWithCloud bool) *Backends {
|
||||
|
||||
ignored := []string{}
|
||||
for _, p := range ignorePorts {
|
||||
ignored = append(ignored, portKey(p))
|
||||
}
|
||||
backendPool := &Backends{
|
||||
cloud: cloud,
|
||||
nodePool: nodePool,
|
||||
healthChecker: healthChecker,
|
||||
namer: namer,
|
||||
ignoredPorts: sets.NewString(ignored...),
|
||||
}
|
||||
if !resyncWithCloud {
|
||||
backendPool.snapshotter = storage.NewInMemoryPool()
|
||||
return backendPool
|
||||
}
|
||||
backendPool.snapshotter = storage.NewCloudListingPool(
|
||||
func(i interface{}) (string, error) {
|
||||
bs := i.(*compute.BackendService)
|
||||
if !namer.NameBelongsToCluster(bs.Name) {
|
||||
return "", fmt.Errorf("unrecognized name %v", bs.Name)
|
||||
}
|
||||
port, err := namer.BePort(bs.Name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return port, nil
|
||||
},
|
||||
backendPool,
|
||||
30*time.Second,
|
||||
)
|
||||
return backendPool
|
||||
}
|
||||
|
||||
// Init sets the probeProvider interface value
|
||||
func (b *Backends) Init(pp probeProvider) {
|
||||
b.prober = pp
|
||||
}
|
||||
|
||||
// Get returns a single backend.
|
||||
func (b *Backends) Get(port int64) (*compute.BackendService, error) {
|
||||
be, err := b.cloud.GetGlobalBackendService(b.namer.BeName(port))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.snapshotter.Add(portKey(port), be)
|
||||
return be, nil
|
||||
}
|
||||
|
||||
func (b *Backends) ensureHealthCheck(sp ServicePort) (string, error) {
|
||||
hc := b.healthChecker.New(sp.Port, sp.Protocol)
|
||||
|
||||
existingLegacyHC, err := b.healthChecker.GetLegacy(sp.Port)
|
||||
if err != nil && !utils.IsNotFoundError(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if existingLegacyHC != nil {
|
||||
glog.V(4).Infof("Applying settings of existing health check to newer health check on port %+v", sp)
|
||||
applyLegacyHCToHC(existingLegacyHC, hc)
|
||||
} else if b.prober != nil {
|
||||
probe, err := b.prober.GetProbe(sp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if probe != nil {
|
||||
glog.V(4).Infof("Applying httpGet settings of readinessProbe to health check on port %+v", sp)
|
||||
applyProbeSettingsToHC(probe, hc)
|
||||
}
|
||||
}
|
||||
|
||||
return b.healthChecker.Sync(hc)
|
||||
}
|
||||
|
||||
func (b *Backends) create(namedPort *compute.NamedPort, hcLink string, sp ServicePort, name string) (*compute.BackendService, error) {
|
||||
bs := &compute.BackendService{
|
||||
Name: name,
|
||||
Description: sp.Description(),
|
||||
Protocol: string(sp.Protocol),
|
||||
HealthChecks: []string{hcLink},
|
||||
Port: namedPort.Port,
|
||||
PortName: namedPort.Name,
|
||||
}
|
||||
if err := b.cloud.CreateGlobalBackendService(bs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Get(namedPort.Port)
|
||||
}
|
||||
|
||||
// Ensure will update or create Backends for the given ports.
|
||||
// Uses the given instance groups if non-nil, else creates instance groups.
|
||||
func (b *Backends) Ensure(svcPorts []ServicePort, igs []*compute.InstanceGroup) error {
|
||||
glog.V(3).Infof("Sync: backends %v", svcPorts)
|
||||
// Ideally callers should pass the instance groups to prevent recomputing them here.
|
||||
// Igs can be nil in scenarios where we do not have instance groups such as
|
||||
// while syncing default backend service.
|
||||
if igs == nil {
|
||||
ports := []int64{}
|
||||
for _, p := range svcPorts {
|
||||
ports = append(ports, p.Port)
|
||||
}
|
||||
var err error
|
||||
igs, _, err = instances.EnsureInstanceGroupsAndPorts(b.nodePool, b.namer, ports)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// create backends for new ports, perform an edge hop for existing ports
|
||||
for _, port := range svcPorts {
|
||||
if err := b.ensureBackendService(port, igs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureBackendService will update or create a Backend for the given port.
|
||||
// It assumes that the instance groups have been created and required named port has been added.
|
||||
// If not, then Ensure should be called instead.
|
||||
func (b *Backends) ensureBackendService(p ServicePort, igs []*compute.InstanceGroup) error {
|
||||
// We must track the ports even if creating the backends failed, because
|
||||
// we might've created health-check for them.
|
||||
be := &compute.BackendService{}
|
||||
defer func() { b.snapshotter.Add(portKey(p.Port), be) }()
|
||||
|
||||
var err error
|
||||
|
||||
// Ensure health check for backend service exists
|
||||
hcLink, err := b.ensureHealthCheck(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify existance of a backend service for the proper port, but do not specify any backends/igs
|
||||
pName := b.namer.BeName(p.Port)
|
||||
be, _ = b.Get(p.Port)
|
||||
if be == nil {
|
||||
namedPort := utils.GetNamedPort(p.Port)
|
||||
glog.V(2).Infof("Creating backend service for port %v named port %v", p.Port, namedPort)
|
||||
be, err = b.create(namedPort, hcLink, p, pName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the backend service has the correct protocol and health check link
|
||||
existingHCLink := ""
|
||||
if len(be.HealthChecks) == 1 {
|
||||
existingHCLink = be.HealthChecks[0]
|
||||
}
|
||||
if be.Protocol != string(p.Protocol) || existingHCLink != hcLink || be.Description != p.Description() {
|
||||
glog.V(2).Infof("Updating backend protocol %v (%v) for change in protocol (%v) or health check", pName, be.Protocol, string(p.Protocol))
|
||||
be.Protocol = string(p.Protocol)
|
||||
be.HealthChecks = []string{hcLink}
|
||||
be.Description = p.Description()
|
||||
if err = b.cloud.UpdateGlobalBackendService(be); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If previous health check was legacy type, we need to delete it.
|
||||
if existingHCLink != hcLink && strings.Contains(existingHCLink, "/httpHealthChecks/") {
|
||||
if err = b.healthChecker.DeleteLegacy(p.Port); err != nil {
|
||||
glog.Warning("Failed to delete legacy HttpHealthCheck %v; Will not try again, err: %v", pName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// we won't find any igs till the node pool syncs nodes.
|
||||
if len(igs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify that backend service contains links to all backends/instance-groups
|
||||
return b.edgeHop(be, igs)
|
||||
}
|
||||
|
||||
// Delete deletes the Backend for the given port.
|
||||
func (b *Backends) Delete(port int64) (err error) {
|
||||
name := b.namer.BeName(port)
|
||||
glog.V(2).Infof("Deleting backend service %v", name)
|
||||
defer func() {
|
||||
if utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
err = nil
|
||||
}
|
||||
if err == nil {
|
||||
b.snapshotter.Delete(portKey(port))
|
||||
}
|
||||
}()
|
||||
// Try deleting health checks even if a backend is not found.
|
||||
if err = b.cloud.DeleteGlobalBackendService(name); err != nil && !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
return b.healthChecker.Delete(port)
|
||||
}
|
||||
|
||||
// List lists all backends.
|
||||
func (b *Backends) List() ([]interface{}, error) {
|
||||
// TODO: for consistency with the rest of this sub-package this method
|
||||
// should return a list of backend ports.
|
||||
interList := []interface{}{}
|
||||
be, err := b.cloud.ListGlobalBackendServices()
|
||||
if err != nil {
|
||||
return interList, err
|
||||
}
|
||||
for i := range be.Items {
|
||||
interList = append(interList, be.Items[i])
|
||||
}
|
||||
return interList, nil
|
||||
}
|
||||
|
||||
func getBackendsForIGs(igs []*compute.InstanceGroup, bm BalancingMode) []*compute.Backend {
|
||||
var backends []*compute.Backend
|
||||
for _, ig := range igs {
|
||||
b := &compute.Backend{
|
||||
Group: ig.SelfLink,
|
||||
BalancingMode: string(bm),
|
||||
}
|
||||
switch bm {
|
||||
case Rate:
|
||||
b.MaxRatePerInstance = maxRPS
|
||||
default:
|
||||
// TODO: Set utilization and connection limits when we accept them
|
||||
// as valid fields.
|
||||
}
|
||||
|
||||
backends = append(backends, b)
|
||||
}
|
||||
return backends
|
||||
}
|
||||
|
||||
// edgeHop checks the links of the given backend by executing an edge hop.
|
||||
// It fixes broken links.
|
||||
func (b *Backends) edgeHop(be *compute.BackendService, igs []*compute.InstanceGroup) error {
|
||||
beIGs := sets.String{}
|
||||
for _, beToIG := range be.Backends {
|
||||
beIGs.Insert(beToIG.Group)
|
||||
}
|
||||
igLinks := sets.String{}
|
||||
for _, igToBE := range igs {
|
||||
igLinks.Insert(igToBE.SelfLink)
|
||||
}
|
||||
if beIGs.IsSuperset(igLinks) {
|
||||
return nil
|
||||
}
|
||||
glog.V(2).Infof("Updating backend service %v with %d backends: expected igs %+v, current igs %+v",
|
||||
be.Name, igLinks.Len(), igLinks.List(), beIGs.List())
|
||||
|
||||
originalBackends := be.Backends
|
||||
var addIGs []*compute.InstanceGroup
|
||||
for _, ig := range igs {
|
||||
if !beIGs.Has(ig.SelfLink) {
|
||||
addIGs = append(addIGs, ig)
|
||||
}
|
||||
}
|
||||
|
||||
// We first try to create the backend with balancingMode=RATE. If this
|
||||
// fails, it's mostly likely because there are existing backends with
|
||||
// balancingMode=UTILIZATION. This failure mode throws a googleapi error
|
||||
// which wraps a HTTP 400 status code. We handle it in the loop below
|
||||
// and come around to retry with the right balancing mode. The goal is to
|
||||
// switch everyone to using RATE.
|
||||
var errs []string
|
||||
for _, bm := range []BalancingMode{Rate, Utilization} {
|
||||
// Generate backends with given instance groups with a specific mode
|
||||
newBackends := getBackendsForIGs(addIGs, bm)
|
||||
be.Backends = append(originalBackends, newBackends...)
|
||||
|
||||
if err := b.cloud.UpdateGlobalBackendService(be); err != nil {
|
||||
if utils.IsHTTPErrorCode(err, http.StatusBadRequest) {
|
||||
glog.V(2).Infof("Updating backend service backends with balancing mode %v failed, will try another mode. err:%v", bm, err)
|
||||
errs = append(errs, err.Error())
|
||||
// This is probably a failure because we tried to create the backend
|
||||
// with balancingMode=RATE when there are already backends with
|
||||
// balancingMode=UTILIZATION. Just ignore it and retry setting
|
||||
// balancingMode=UTILIZATION (b/35102911).
|
||||
continue
|
||||
}
|
||||
glog.V(2).Infof("Error updating backend service backends with balancing mode %v:%v", bm, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("received errors when updating backend service: %v", strings.Join(errs, "\n"))
|
||||
}
|
||||
|
||||
// GC garbage collects services corresponding to ports in the given list.
|
||||
func (b *Backends) GC(svcNodePorts []ServicePort) error {
|
||||
knownPorts := sets.NewString()
|
||||
for _, p := range svcNodePorts {
|
||||
knownPorts.Insert(portKey(p.Port))
|
||||
}
|
||||
pool := b.snapshotter.Snapshot()
|
||||
for port := range pool {
|
||||
p, err := strconv.ParseUint(port, 10, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodePort := int64(p)
|
||||
if knownPorts.Has(portKey(nodePort)) || b.ignoredPorts.Has(portKey(nodePort)) {
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("GCing backend for port %v", p)
|
||||
if err := b.Delete(nodePort); err != nil && !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown deletes all backends and the default backend.
|
||||
// This will fail if one of the backends is being used by another resource.
|
||||
func (b *Backends) Shutdown() error {
|
||||
if err := b.GC([]ServicePort{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status returns the status of the given backend by name.
|
||||
func (b *Backends) Status(name string) string {
|
||||
backend, err := b.cloud.GetGlobalBackendService(name)
|
||||
if err != nil || len(backend.Backends) == 0 {
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
// TODO: Look at more than one backend's status
|
||||
// TODO: Include port, ip in the status, since it's in the health info.
|
||||
hs, err := b.cloud.GetGlobalBackendServiceHealth(name, backend.Backends[0].Group)
|
||||
if err != nil || len(hs.HealthStatus) == 0 || hs.HealthStatus[0] == nil {
|
||||
return "Unknown"
|
||||
}
|
||||
// TODO: State transition are important, not just the latest.
|
||||
return hs.HealthStatus[0].HealthState
|
||||
}
|
||||
|
||||
func applyLegacyHCToHC(existing *compute.HttpHealthCheck, hc *healthchecks.HealthCheck) {
|
||||
hc.Description = existing.Description
|
||||
hc.CheckIntervalSec = existing.CheckIntervalSec
|
||||
hc.HealthyThreshold = existing.HealthyThreshold
|
||||
hc.Host = existing.Host
|
||||
hc.Port = existing.Port
|
||||
hc.RequestPath = existing.RequestPath
|
||||
hc.TimeoutSec = existing.TimeoutSec
|
||||
hc.UnhealthyThreshold = existing.UnhealthyThreshold
|
||||
}
|
||||
|
||||
func applyProbeSettingsToHC(p *v1.Probe, hc *healthchecks.HealthCheck) {
|
||||
healthPath := p.Handler.HTTPGet.Path
|
||||
// GCE requires a leading "/" for health check urls.
|
||||
if !strings.HasPrefix(healthPath, "/") {
|
||||
healthPath = "/" + healthPath
|
||||
}
|
||||
// Extract host from HTTP headers
|
||||
host := p.Handler.HTTPGet.Host
|
||||
for _, header := range p.Handler.HTTPGet.HTTPHeaders {
|
||||
if header.Name == "Host" {
|
||||
host = header.Value
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
hc.RequestPath = healthPath
|
||||
hc.Host = host
|
||||
hc.Description = "Kubernetes L7 health check generated with readiness probe settings."
|
||||
hc.CheckIntervalSec = int64(p.PeriodSeconds) + int64(healthchecks.DefaultHealthCheckInterval.Seconds())
|
||||
hc.TimeoutSec = int64(p.TimeoutSeconds)
|
||||
}
|
|
@ -1,505 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backends
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
const defaultZone = "zone-a"
|
||||
|
||||
var noOpErrFunc = func(op int, be *compute.BackendService) error { return nil }
|
||||
|
||||
var existingProbe = &api_v1.Probe{
|
||||
Handler: api_v1.Handler{
|
||||
HTTPGet: &api_v1.HTTPGetAction{
|
||||
Scheme: api_v1.URISchemeHTTPS,
|
||||
Path: "/my-special-path",
|
||||
Port: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 443,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func newTestJig(f BackendServices, fakeIGs instances.InstanceGroups, syncWithCloud bool) (*Backends, healthchecks.HealthCheckProvider) {
|
||||
namer := &utils.Namer{}
|
||||
nodePool := instances.NewNodePool(fakeIGs)
|
||||
nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}})
|
||||
healthCheckProvider := healthchecks.NewFakeHealthCheckProvider()
|
||||
healthChecks := healthchecks.NewHealthChecker(healthCheckProvider, "/", namer)
|
||||
bp := NewBackendPool(f, healthChecks, nodePool, namer, []int64{}, syncWithCloud)
|
||||
probes := map[ServicePort]*api_v1.Probe{{Port: 443, Protocol: utils.ProtocolHTTPS}: existingProbe}
|
||||
bp.Init(NewFakeProbeProvider(probes))
|
||||
|
||||
return bp, healthCheckProvider
|
||||
}
|
||||
|
||||
func TestBackendPoolAdd(t *testing.T) {
|
||||
f := NewFakeBackendServices(noOpErrFunc)
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
pool, _ := newTestJig(f, fakeIGs, false)
|
||||
namer := utils.Namer{}
|
||||
|
||||
testCases := []ServicePort{
|
||||
{Port: 80, Protocol: utils.ProtocolHTTP},
|
||||
{Port: 443, Protocol: utils.ProtocolHTTPS},
|
||||
}
|
||||
|
||||
for _, nodePort := range testCases {
|
||||
// For simplicity, these tests use 80/443 as nodeports
|
||||
t.Run(fmt.Sprintf("Port:%v Protocol:%v", nodePort.Port, nodePort.Protocol), func(t *testing.T) {
|
||||
// Add a backend for a port, then re-add the same port and
|
||||
// make sure it corrects a broken link from the backend to
|
||||
// the instance group.
|
||||
err := pool.Ensure([]ServicePort{nodePort}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Did not find expect error when adding a nodeport: %v, err: %v", nodePort, err)
|
||||
}
|
||||
beName := namer.BeName(nodePort.Port)
|
||||
|
||||
// Check that the new backend has the right port
|
||||
be, err := f.GetGlobalBackendService(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("Did not find expected backend %v", beName)
|
||||
}
|
||||
if be.Port != nodePort.Port {
|
||||
t.Fatalf("Backend %v has wrong port %v, expected %v", be.Name, be.Port, nodePort)
|
||||
}
|
||||
|
||||
// Check that the instance group has the new port.
|
||||
ig, err := fakeIGs.GetInstanceGroup(namer.IGName(), defaultZone)
|
||||
var found bool
|
||||
for _, port := range ig.NamedPorts {
|
||||
if port.Port == nodePort.Port {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("Port %v not added to instance group", nodePort)
|
||||
}
|
||||
|
||||
// Check the created healthcheck is the correct protocol
|
||||
hc, err := pool.healthChecker.Get(nodePort.Port)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected err when querying fake healthchecker: %v", err)
|
||||
}
|
||||
|
||||
if hc.Protocol() != nodePort.Protocol {
|
||||
t.Fatalf("Healthcheck scheme does not match nodeport scheme: hc:%v np:%v", hc.Protocol(), nodePort.Protocol)
|
||||
}
|
||||
|
||||
if nodePort.Port == 443 && hc.RequestPath != "/my-special-path" {
|
||||
t.Fatalf("Healthcheck for 443 should have special request path from probe")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthCheckMigration(t *testing.T) {
|
||||
f := NewFakeBackendServices(noOpErrFunc)
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
pool, hcp := newTestJig(f, fakeIGs, false)
|
||||
namer := utils.Namer{}
|
||||
|
||||
p := ServicePort{Port: 7000, Protocol: utils.ProtocolHTTP}
|
||||
|
||||
// Create a legacy health check and insert it into the HC provider.
|
||||
legacyHC := &compute.HttpHealthCheck{
|
||||
Name: namer.BeName(p.Port),
|
||||
RequestPath: "/my-healthz-path",
|
||||
Host: "k8s.io",
|
||||
Description: "My custom HC",
|
||||
UnhealthyThreshold: 30,
|
||||
CheckIntervalSec: 40,
|
||||
}
|
||||
hcp.CreateHttpHealthCheck(legacyHC)
|
||||
|
||||
// Add the service port to the backend pool
|
||||
pool.Ensure([]ServicePort{p}, nil)
|
||||
|
||||
// Assert the proper health check was created
|
||||
hc, _ := pool.healthChecker.Get(p.Port)
|
||||
if hc == nil || hc.Protocol() != p.Protocol {
|
||||
t.Fatalf("Expected %s health check, received %v: ", p.Protocol, hc)
|
||||
}
|
||||
|
||||
// Assert the newer health check has the legacy health check settings
|
||||
if hc.RequestPath != legacyHC.RequestPath ||
|
||||
hc.Host != legacyHC.Host ||
|
||||
hc.UnhealthyThreshold != legacyHC.UnhealthyThreshold ||
|
||||
hc.CheckIntervalSec != legacyHC.CheckIntervalSec ||
|
||||
hc.Description != legacyHC.Description {
|
||||
t.Fatalf("Expected newer health check to have identical settings to legacy health check. Legacy: %+v, New: %+v", legacyHC, hc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendPoolUpdate(t *testing.T) {
|
||||
f := NewFakeBackendServices(noOpErrFunc)
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
pool, _ := newTestJig(f, fakeIGs, false)
|
||||
namer := utils.Namer{}
|
||||
|
||||
p := ServicePort{Port: 3000, Protocol: utils.ProtocolHTTP}
|
||||
pool.Ensure([]ServicePort{p}, nil)
|
||||
beName := namer.BeName(p.Port)
|
||||
|
||||
be, err := f.GetGlobalBackendService(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected err: %v", err)
|
||||
}
|
||||
|
||||
if utils.AppProtocol(be.Protocol) != p.Protocol {
|
||||
t.Fatalf("Expected scheme %v but got %v", p.Protocol, be.Protocol)
|
||||
}
|
||||
|
||||
// Assert the proper health check was created
|
||||
hc, _ := pool.healthChecker.Get(p.Port)
|
||||
if hc == nil || hc.Protocol() != p.Protocol {
|
||||
t.Fatalf("Expected %s health check, received %v: ", p.Protocol, hc)
|
||||
}
|
||||
|
||||
// Update service port to encrypted
|
||||
p.Protocol = utils.ProtocolHTTPS
|
||||
pool.Ensure([]ServicePort{p}, nil)
|
||||
|
||||
be, err = f.GetGlobalBackendService(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected err retrieving backend service after update: %v", err)
|
||||
}
|
||||
|
||||
// Assert the backend has the correct protocol
|
||||
if utils.AppProtocol(be.Protocol) != p.Protocol {
|
||||
t.Fatalf("Expected scheme %v but got %v", p.Protocol, utils.AppProtocol(be.Protocol))
|
||||
}
|
||||
|
||||
// Assert the proper health check was created
|
||||
hc, _ = pool.healthChecker.Get(p.Port)
|
||||
if hc == nil || hc.Protocol() != p.Protocol {
|
||||
t.Fatalf("Expected %s health check, received %v: ", p.Protocol, hc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendPoolChaosMonkey(t *testing.T) {
|
||||
f := NewFakeBackendServices(noOpErrFunc)
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
pool, _ := newTestJig(f, fakeIGs, false)
|
||||
namer := utils.Namer{}
|
||||
|
||||
nodePort := ServicePort{Port: 8080, Protocol: utils.ProtocolHTTP}
|
||||
pool.Ensure([]ServicePort{nodePort}, nil)
|
||||
beName := namer.BeName(nodePort.Port)
|
||||
|
||||
be, _ := f.GetGlobalBackendService(beName)
|
||||
|
||||
// Mess up the link between backend service and instance group.
|
||||
// This simulates a user doing foolish things through the UI.
|
||||
be.Backends = []*compute.Backend{
|
||||
{Group: "test edge hop"},
|
||||
}
|
||||
f.calls = []int{}
|
||||
f.UpdateGlobalBackendService(be)
|
||||
|
||||
pool.Ensure([]ServicePort{nodePort}, nil)
|
||||
for _, call := range f.calls {
|
||||
if call == utils.Create {
|
||||
t.Fatalf("Unexpected create for existing backend service")
|
||||
}
|
||||
}
|
||||
gotBackend, err := f.GetGlobalBackendService(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to find a backend with name %v: %v", beName, err)
|
||||
}
|
||||
gotGroup, err := fakeIGs.GetInstanceGroup(namer.IGName(), defaultZone)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to find instance group %v", namer.IGName())
|
||||
}
|
||||
backendLinks := sets.NewString()
|
||||
for _, be := range gotBackend.Backends {
|
||||
backendLinks.Insert(be.Group)
|
||||
}
|
||||
if !backendLinks.Has(gotGroup.SelfLink) {
|
||||
t.Fatalf(
|
||||
"Broken instance group link, got: %+v expected: %v",
|
||||
backendLinks.List(),
|
||||
gotGroup.SelfLink)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendPoolSync(t *testing.T) {
|
||||
// Call sync on a backend pool with a list of ports, make sure the pool
|
||||
// creates/deletes required ports.
|
||||
svcNodePorts := []ServicePort{{Port: 81, Protocol: utils.ProtocolHTTP}, {Port: 82, Protocol: utils.ProtocolHTTPS}, {Port: 83, Protocol: utils.ProtocolHTTP}}
|
||||
f := NewFakeBackendServices(noOpErrFunc)
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
pool, _ := newTestJig(f, fakeIGs, true)
|
||||
pool.Ensure([]ServicePort{ServicePort{Port: 81}}, nil)
|
||||
pool.Ensure([]ServicePort{ServicePort{Port: 90}}, nil)
|
||||
if err := pool.Ensure(svcNodePorts, nil); err != nil {
|
||||
t.Errorf("Expected backend pool to add node ports, err: %v", err)
|
||||
}
|
||||
if err := pool.GC(svcNodePorts); err != nil {
|
||||
t.Errorf("Expected backend pool to GC, err: %v", err)
|
||||
}
|
||||
if _, err := pool.Get(90); err == nil {
|
||||
t.Fatalf("Did not expect to find port 90")
|
||||
}
|
||||
for _, port := range svcNodePorts {
|
||||
if _, err := pool.Get(port.Port); err != nil {
|
||||
t.Fatalf("Expected to find port %v", port)
|
||||
}
|
||||
}
|
||||
|
||||
svcNodePorts = []ServicePort{{Port: 81}}
|
||||
deletedPorts := []ServicePort{{Port: 82}, {Port: 83}}
|
||||
if err := pool.GC(svcNodePorts); err != nil {
|
||||
t.Fatalf("Expected backend pool to GC, err: %v", err)
|
||||
}
|
||||
|
||||
for _, port := range deletedPorts {
|
||||
if _, err := pool.Get(port.Port); err == nil {
|
||||
t.Fatalf("Pool contains %v after deletion", port)
|
||||
}
|
||||
}
|
||||
|
||||
// All these backends should be ignored because they don't belong to the cluster.
|
||||
// foo - non k8s managed backend
|
||||
// k8s-be-foo - foo is not a nodeport
|
||||
// k8s--bar--foo - too many cluster delimiters
|
||||
// k8s-be-3001--uid - another cluster tagged with uid
|
||||
unrelatedBackends := sets.NewString([]string{"foo", "k8s-be-foo", "k8s--bar--foo", "k8s-be-30001--uid"}...)
|
||||
for _, name := range unrelatedBackends.List() {
|
||||
f.CreateGlobalBackendService(&compute.BackendService{Name: name})
|
||||
}
|
||||
|
||||
namer := &utils.Namer{}
|
||||
// This backend should get deleted again since it is managed by this cluster.
|
||||
f.CreateGlobalBackendService(&compute.BackendService{Name: namer.BeName(deletedPorts[0].Port)})
|
||||
|
||||
// TODO: Avoid casting.
|
||||
// Repopulate the pool with a cloud list, which now includes the 82 port
|
||||
// backend. This would happen if, say, an ingress backend is removed
|
||||
// while the controller is restarting.
|
||||
pool.snapshotter.(*storage.CloudListingPool).ReplenishPool()
|
||||
|
||||
pool.GC(svcNodePorts)
|
||||
|
||||
currBackends, _ := f.ListGlobalBackendServices()
|
||||
currSet := sets.NewString()
|
||||
for _, b := range currBackends.Items {
|
||||
currSet.Insert(b.Name)
|
||||
}
|
||||
// Port 81 still exists because it's an in-use service NodePort.
|
||||
knownBe := namer.BeName(81)
|
||||
if !currSet.Has(knownBe) {
|
||||
t.Fatalf("Expected %v to exist in backend pool", knownBe)
|
||||
}
|
||||
currSet.Delete(knownBe)
|
||||
if !currSet.Equal(unrelatedBackends) {
|
||||
t.Fatalf("Some unrelated backends were deleted. Expected %+v, got %+v", unrelatedBackends, currSet)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) {
|
||||
namer := &utils.Namer{}
|
||||
f := NewFakeBackendServices(noOpErrFunc)
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
nodePool := instances.NewNodePool(fakeIGs)
|
||||
nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}})
|
||||
hcp := healthchecks.NewFakeHealthCheckProvider()
|
||||
healthChecks := healthchecks.NewHealthChecker(hcp, "/", namer)
|
||||
bp := NewBackendPool(f, healthChecks, nodePool, namer, []int64{}, false)
|
||||
probes := map[ServicePort]*api_v1.Probe{}
|
||||
bp.Init(NewFakeProbeProvider(probes))
|
||||
|
||||
// Create a legacy HTTP health check
|
||||
beName := namer.BeName(80)
|
||||
if err := hcp.CreateHttpHealthCheck(&compute.HttpHealthCheck{
|
||||
Name: beName,
|
||||
Port: 80,
|
||||
}); err != nil {
|
||||
t.Fatalf("unexpected error creating http health check %v", err)
|
||||
}
|
||||
|
||||
// Verify health check exists
|
||||
hc, err := hcp.GetHttpHealthCheck(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting http health check %v", err)
|
||||
}
|
||||
|
||||
// Create backend service with expected name and link to legacy health check
|
||||
f.CreateGlobalBackendService(&compute.BackendService{
|
||||
Name: beName,
|
||||
HealthChecks: []string{hc.SelfLink},
|
||||
})
|
||||
|
||||
// Have pool sync the above backend service
|
||||
bp.Ensure([]ServicePort{ServicePort{Port: 80, Protocol: utils.ProtocolHTTPS}}, nil)
|
||||
|
||||
// Verify the legacy health check has been deleted
|
||||
_, err = hcp.GetHttpHealthCheck(beName)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error getting http health check %v", err)
|
||||
}
|
||||
|
||||
// Verify a newer health check exists
|
||||
hcNew, err := hcp.GetHealthCheck(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting http health check %v", err)
|
||||
}
|
||||
|
||||
// Verify the newer health check is of type HTTPS
|
||||
if hcNew.Type != string(utils.ProtocolHTTPS) {
|
||||
t.Fatalf("expected health check type to be %v, actual %v", string(utils.ProtocolHTTPS), hcNew.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendPoolShutdown(t *testing.T) {
|
||||
f := NewFakeBackendServices(noOpErrFunc)
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
pool, _ := newTestJig(f, fakeIGs, false)
|
||||
namer := utils.Namer{}
|
||||
|
||||
// Add a backend-service and verify that it doesn't exist after Shutdown()
|
||||
pool.Ensure([]ServicePort{ServicePort{Port: 80}}, nil)
|
||||
pool.Shutdown()
|
||||
if _, err := f.GetGlobalBackendService(namer.BeName(80)); err == nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendInstanceGroupClobbering(t *testing.T) {
|
||||
f := NewFakeBackendServices(noOpErrFunc)
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
pool, _ := newTestJig(f, fakeIGs, false)
|
||||
namer := utils.Namer{}
|
||||
|
||||
// This will add the instance group k8s-ig to the instance pool
|
||||
pool.Ensure([]ServicePort{ServicePort{Port: 80}}, nil)
|
||||
|
||||
be, err := f.GetGlobalBackendService(namer.BeName(80))
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
// Simulate another controller updating the same backend service with
|
||||
// a different instance group
|
||||
newGroups := []*compute.Backend{
|
||||
{Group: "k8s-ig-bar"},
|
||||
{Group: "k8s-ig-foo"},
|
||||
}
|
||||
be.Backends = append(be.Backends, newGroups...)
|
||||
if err = f.UpdateGlobalBackendService(be); err != nil {
|
||||
t.Fatalf("Failed to update backend service %v", be.Name)
|
||||
}
|
||||
|
||||
// Make sure repeated adds don't clobber the inserted instance group
|
||||
pool.Ensure([]ServicePort{ServicePort{Port: 80}}, nil)
|
||||
be, err = f.GetGlobalBackendService(namer.BeName(80))
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
gotGroups := sets.NewString()
|
||||
for _, g := range be.Backends {
|
||||
gotGroups.Insert(g.Group)
|
||||
}
|
||||
|
||||
// seed expectedGroups with the first group native to this controller
|
||||
expectedGroups := sets.NewString("k8s-ig")
|
||||
for _, newGroup := range newGroups {
|
||||
expectedGroups.Insert(newGroup.Group)
|
||||
}
|
||||
if !expectedGroups.Equal(gotGroups) {
|
||||
t.Fatalf("Expected %v Got %v", expectedGroups, gotGroups)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendCreateBalancingMode(t *testing.T) {
|
||||
f := NewFakeBackendServices(noOpErrFunc)
|
||||
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
pool, _ := newTestJig(f, fakeIGs, false)
|
||||
namer := utils.Namer{}
|
||||
nodePort := ServicePort{Port: 8080}
|
||||
modes := []BalancingMode{Rate, Utilization}
|
||||
|
||||
// block the creation of Backends with the given balancingMode
|
||||
// and verify that a backend with the other balancingMode is
|
||||
// created
|
||||
for i, bm := range modes {
|
||||
f.errFunc = func(op int, be *compute.BackendService) error {
|
||||
for _, b := range be.Backends {
|
||||
if b.BalancingMode == string(bm) {
|
||||
return &googleapi.Error{Code: http.StatusBadRequest}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
pool.Ensure([]ServicePort{nodePort}, nil)
|
||||
be, err := f.GetGlobalBackendService(namer.BeName(nodePort.Port))
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
for _, b := range be.Backends {
|
||||
if b.BalancingMode != string(modes[(i+1)%len(modes)]) {
|
||||
t.Fatalf("Wrong balancing mode, expected %v got %v", modes[(i+1)%len(modes)], b.BalancingMode)
|
||||
}
|
||||
}
|
||||
pool.GC([]ServicePort{})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyProbeSettingsToHC(t *testing.T) {
|
||||
p := "healthz"
|
||||
hc := healthchecks.DefaultHealthCheck(8080, utils.ProtocolHTTPS)
|
||||
probe := &api_v1.Probe{
|
||||
Handler: api_v1.Handler{
|
||||
HTTPGet: &api_v1.HTTPGetAction{
|
||||
Scheme: api_v1.URISchemeHTTP,
|
||||
Path: p,
|
||||
Port: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
applyProbeSettingsToHC(probe, hc)
|
||||
|
||||
if hc.Protocol() != utils.ProtocolHTTPS || hc.Port != 8080 {
|
||||
t.Errorf("Basic HC settings changed")
|
||||
}
|
||||
if hc.RequestPath != "/"+p {
|
||||
t.Errorf("Failed to apply probe's requestpath")
|
||||
}
|
||||
}
|
|
@ -1,144 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backends
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// NewFakeBackendServices creates a new fake backend services manager.
|
||||
func NewFakeBackendServices(ef func(op int, be *compute.BackendService) error) *FakeBackendServices {
|
||||
return &FakeBackendServices{
|
||||
errFunc: ef,
|
||||
backendServices: cache.NewStore(func(obj interface{}) (string, error) {
|
||||
svc := obj.(*compute.BackendService)
|
||||
return svc.Name, nil
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// FakeBackendServices fakes out GCE backend services.
|
||||
type FakeBackendServices struct {
|
||||
backendServices cache.Store
|
||||
calls []int
|
||||
errFunc func(op int, be *compute.BackendService) error
|
||||
}
|
||||
|
||||
// GetGlobalBackendService fakes getting a backend service from the cloud.
|
||||
func (f *FakeBackendServices) GetGlobalBackendService(name string) (*compute.BackendService, error) {
|
||||
f.calls = append(f.calls, utils.Get)
|
||||
obj, exists, err := f.backendServices.GetByKey(name)
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("backend service %v not found", name)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
svc := obj.(*compute.BackendService)
|
||||
if name == svc.Name {
|
||||
return svc, nil
|
||||
}
|
||||
return nil, fmt.Errorf("backend service %v not found", name)
|
||||
}
|
||||
|
||||
// CreateGlobalBackendService fakes backend service creation.
|
||||
func (f *FakeBackendServices) CreateGlobalBackendService(be *compute.BackendService) error {
|
||||
if f.errFunc != nil {
|
||||
if err := f.errFunc(utils.Create, be); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
f.calls = append(f.calls, utils.Create)
|
||||
be.SelfLink = be.Name
|
||||
return f.backendServices.Update(be)
|
||||
}
|
||||
|
||||
// DeleteGlobalBackendService fakes backend service deletion.
|
||||
func (f *FakeBackendServices) DeleteGlobalBackendService(name string) error {
|
||||
f.calls = append(f.calls, utils.Delete)
|
||||
svc, exists, err := f.backendServices.GetByKey(name)
|
||||
if !exists {
|
||||
return fmt.Errorf("backend service %v not found", name)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.backendServices.Delete(svc)
|
||||
}
|
||||
|
||||
// ListGlobalBackendServices fakes backend service listing.
|
||||
func (f *FakeBackendServices) ListGlobalBackendServices() (*compute.BackendServiceList, error) {
|
||||
var svcs []*compute.BackendService
|
||||
for _, s := range f.backendServices.List() {
|
||||
svc := s.(*compute.BackendService)
|
||||
svcs = append(svcs, svc)
|
||||
}
|
||||
return &compute.BackendServiceList{Items: svcs}, nil
|
||||
}
|
||||
|
||||
// UpdateGlobalBackendService fakes updating a backend service.
|
||||
func (f *FakeBackendServices) UpdateGlobalBackendService(be *compute.BackendService) error {
|
||||
if f.errFunc != nil {
|
||||
if err := f.errFunc(utils.Update, be); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
f.calls = append(f.calls, utils.Update)
|
||||
return f.backendServices.Update(be)
|
||||
}
|
||||
|
||||
// GetGlobalBackendServiceHealth fakes getting backend service health.
|
||||
func (f *FakeBackendServices) GetGlobalBackendServiceHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
||||
be, err := f.GetGlobalBackendService(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
states := []*compute.HealthStatus{
|
||||
{
|
||||
HealthState: "HEALTHY",
|
||||
IpAddress: "",
|
||||
Port: be.Port,
|
||||
},
|
||||
}
|
||||
return &compute.BackendServiceGroupHealth{
|
||||
HealthStatus: states}, nil
|
||||
}
|
||||
|
||||
// FakeProbeProvider implements the probeProvider interface for tests.
|
||||
type FakeProbeProvider struct {
|
||||
probes map[ServicePort]*api_v1.Probe
|
||||
}
|
||||
|
||||
// NewFakeProbeProvider returns a struct which satisfies probeProvider interface
|
||||
func NewFakeProbeProvider(probes map[ServicePort]*api_v1.Probe) *FakeProbeProvider {
|
||||
return &FakeProbeProvider{probes}
|
||||
}
|
||||
|
||||
// GetProbe returns the probe for a given nodePort
|
||||
func (pp *FakeProbeProvider) GetProbe(port ServicePort) (*api_v1.Probe, error) {
|
||||
if probe, exists := pp.probes[port]; exists && probe.HTTPGet != nil {
|
||||
return probe, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backends
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ProbeProvider retrieves a probe struct given a nodePort
|
||||
type probeProvider interface {
|
||||
GetProbe(sp ServicePort) (*api_v1.Probe, error)
|
||||
}
|
||||
|
||||
// BackendPool is an interface to manage a pool of kubernetes nodePort services
|
||||
// as gce backendServices, and sync them through the BackendServices interface.
|
||||
type BackendPool interface {
|
||||
Init(p probeProvider)
|
||||
Ensure(ports []ServicePort, igs []*compute.InstanceGroup) error
|
||||
Get(port int64) (*compute.BackendService, error)
|
||||
Delete(port int64) error
|
||||
GC(ports []ServicePort) error
|
||||
Shutdown() error
|
||||
Status(name string) string
|
||||
List() ([]interface{}, error)
|
||||
}
|
||||
|
||||
// BackendServices is an interface for managing gce backend services.
|
||||
type BackendServices interface {
|
||||
GetGlobalBackendService(name string) (*compute.BackendService, error)
|
||||
UpdateGlobalBackendService(bg *compute.BackendService) error
|
||||
CreateGlobalBackendService(bg *compute.BackendService) error
|
||||
DeleteGlobalBackendService(name string) error
|
||||
ListGlobalBackendServices() (*compute.BackendServiceList, error)
|
||||
GetGlobalBackendServiceHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error)
|
||||
}
|
|
@ -1,252 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultPort = 80
|
||||
defaultHealthCheckPath = "/"
|
||||
|
||||
// A backend is created per nodePort, tagged with the nodeport.
|
||||
// This allows sharing of backends across loadbalancers.
|
||||
backendPrefix = "k8s-be"
|
||||
|
||||
// A single target proxy/urlmap/forwarding rule is created per loadbalancer.
|
||||
// Tagged with the namespace/name of the Ingress.
|
||||
targetProxyPrefix = "k8s-tp"
|
||||
forwardingRulePrefix = "k8s-fw"
|
||||
urlMapPrefix = "k8s-um"
|
||||
|
||||
// Used in the test RunServer method to denote a delete request.
|
||||
deleteType = "del"
|
||||
|
||||
// port 0 is used as a signal for port not found/no such port etc.
|
||||
invalidPort = 0
|
||||
|
||||
// Names longer than this are truncated, because of GCE restrictions.
|
||||
nameLenLimit = 62
|
||||
)
|
||||
|
||||
// ClusterManager manages cluster resource pools.
|
||||
type ClusterManager struct {
|
||||
ClusterNamer *utils.Namer
|
||||
defaultBackendNodePort backends.ServicePort
|
||||
instancePool instances.NodePool
|
||||
backendPool backends.BackendPool
|
||||
l7Pool loadbalancers.LoadBalancerPool
|
||||
firewallPool firewalls.SingleFirewallPool
|
||||
|
||||
// TODO: Refactor so we simply init a health check pool.
|
||||
// Currently health checks are tied to backends because each backend needs
|
||||
// the link of the associated health, but both the backend pool and
|
||||
// loadbalancer pool manage backends, because the lifetime of the default
|
||||
// backend is tied to the last/first loadbalancer not the life of the
|
||||
// nodeport service or Ingress.
|
||||
healthCheckers []healthchecks.HealthChecker
|
||||
}
|
||||
|
||||
// Init initializes the cluster manager.
|
||||
func (c *ClusterManager) Init(tr *GCETranslator) {
|
||||
c.instancePool.Init(tr)
|
||||
c.backendPool.Init(tr)
|
||||
// TODO: Initialize other members as needed.
|
||||
}
|
||||
|
||||
// IsHealthy returns an error if the cluster manager is unhealthy.
|
||||
func (c *ClusterManager) IsHealthy() (err error) {
|
||||
// TODO: Expand on this, for now we just want to detect when the GCE client
|
||||
// is broken.
|
||||
_, err = c.backendPool.List()
|
||||
|
||||
// If this container is scheduled on a node without compute/rw it is
|
||||
// effectively useless, but it is healthy. Reporting it as unhealthy
|
||||
// will lead to container crashlooping.
|
||||
if utils.IsHTTPErrorCode(err, http.StatusForbidden) {
|
||||
glog.Infof("Reporting cluster as healthy, but unable to list backends: %v", err)
|
||||
return nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *ClusterManager) shutdown() error {
|
||||
if err := c.l7Pool.Shutdown(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.firewallPool.Shutdown(); err != nil {
|
||||
return err
|
||||
}
|
||||
// The backend pool will also delete instance groups.
|
||||
return c.backendPool.Shutdown()
|
||||
}
|
||||
|
||||
// Checkpoint performs a checkpoint with the cloud.
|
||||
// - lbs are the single cluster L7 loadbalancers we wish to exist. If they already
|
||||
// exist, they should not have any broken links between say, a UrlMap and
|
||||
// TargetHttpProxy.
|
||||
// - nodeNames are the names of nodes we wish to add to all loadbalancer
|
||||
// instance groups.
|
||||
// - backendServicePorts are the ports for which we require BackendServices.
|
||||
// - namedPorts are the ports which must be opened on instance groups.
|
||||
// Returns the list of all instance groups corresponding to the given loadbalancers.
|
||||
// If in performing the checkpoint the cluster manager runs out of quota, a
|
||||
// googleapi 403 is returned.
|
||||
func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeNames []string, backendServicePorts []backends.ServicePort, namedPorts []backends.ServicePort) ([]*compute.InstanceGroup, error) {
|
||||
if len(namedPorts) != 0 {
|
||||
// Add the default backend node port to the list of named ports for instance groups.
|
||||
namedPorts = append(namedPorts, c.defaultBackendNodePort)
|
||||
}
|
||||
// Multiple ingress paths can point to the same service (and hence nodePort)
|
||||
// but each nodePort can only have one set of cloud resources behind it. So
|
||||
// don't waste time double validating GCE BackendServices.
|
||||
namedPorts = uniq(namedPorts)
|
||||
backendServicePorts = uniq(backendServicePorts)
|
||||
// Create Instance Groups.
|
||||
igs, err := c.EnsureInstanceGroupsAndPorts(namedPorts)
|
||||
if err != nil {
|
||||
return igs, err
|
||||
}
|
||||
if err := c.backendPool.Ensure(backendServicePorts, igs); err != nil {
|
||||
return igs, err
|
||||
}
|
||||
if err := c.instancePool.Sync(nodeNames); err != nil {
|
||||
return igs, err
|
||||
}
|
||||
if err := c.l7Pool.Sync(lbs); err != nil {
|
||||
return igs, err
|
||||
}
|
||||
|
||||
// TODO: Manage default backend and its firewall rule in a centralized way.
|
||||
// DefaultBackend is managed in l7 pool, which doesn't understand instances,
|
||||
// which the firewall rule requires.
|
||||
fwNodePorts := backendServicePorts
|
||||
if len(lbs) != 0 {
|
||||
// If there are no Ingresses, we shouldn't be allowing traffic to the
|
||||
// default backend. Equally importantly if the cluster gets torn down
|
||||
// we shouldn't leak the firewall rule.
|
||||
fwNodePorts = append(fwNodePorts, c.defaultBackendNodePort)
|
||||
}
|
||||
|
||||
var np []int64
|
||||
for _, p := range fwNodePorts {
|
||||
np = append(np, p.Port)
|
||||
}
|
||||
if err := c.firewallPool.Sync(np, nodeNames); err != nil {
|
||||
return igs, err
|
||||
}
|
||||
|
||||
return igs, nil
|
||||
}
|
||||
|
||||
func (c *ClusterManager) EnsureInstanceGroupsAndPorts(servicePorts []backends.ServicePort) ([]*compute.InstanceGroup, error) {
|
||||
ports := []int64{}
|
||||
for _, p := range servicePorts {
|
||||
ports = append(ports, p.Port)
|
||||
}
|
||||
igs, _, err := instances.EnsureInstanceGroupsAndPorts(c.instancePool, c.ClusterNamer, ports)
|
||||
return igs, err
|
||||
}
|
||||
|
||||
// GC garbage collects unused resources.
|
||||
// - lbNames are the names of L7 loadbalancers we wish to exist. Those not in
|
||||
// this list are removed from the cloud.
|
||||
// - nodePorts are the ports for which we want BackendServies. BackendServices
|
||||
// for ports not in this list are deleted.
|
||||
// This method ignores googleapi 404 errors (StatusNotFound).
|
||||
func (c *ClusterManager) GC(lbNames []string, nodePorts []backends.ServicePort) error {
|
||||
|
||||
// On GC:
|
||||
// * Loadbalancers need to get deleted before backends.
|
||||
// * Backends are refcounted in a shared pool.
|
||||
// * We always want to GC backends even if there was an error in GCing
|
||||
// loadbalancers, because the next Sync could rely on the GC for quota.
|
||||
// * There are at least 2 cases for backend GC:
|
||||
// 1. The loadbalancer has been deleted.
|
||||
// 2. An update to the url map drops the refcount of a backend. This can
|
||||
// happen when an Ingress is updated, if we don't GC after the update
|
||||
// we'll leak the backend.
|
||||
|
||||
lbErr := c.l7Pool.GC(lbNames)
|
||||
beErr := c.backendPool.GC(nodePorts)
|
||||
if lbErr != nil {
|
||||
return lbErr
|
||||
}
|
||||
if beErr != nil {
|
||||
return beErr
|
||||
}
|
||||
|
||||
// TODO(ingress#120): Move this to the backend pool so it mirrors creation
|
||||
var igErr error
|
||||
if len(lbNames) == 0 {
|
||||
igName := c.ClusterNamer.IGName()
|
||||
glog.Infof("Deleting instance group %v", igName)
|
||||
igErr = c.instancePool.DeleteInstanceGroup(igName)
|
||||
}
|
||||
if igErr != nil {
|
||||
return igErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewClusterManager creates a cluster manager for shared resources.
|
||||
// - namer: is the namer used to tag cluster wide shared resources.
|
||||
// - defaultBackendNodePort: is the node port of glbc's default backend. This is
|
||||
// the kubernetes Service that serves the 404 page if no urls match.
|
||||
// - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz".
|
||||
func NewClusterManager(
|
||||
cloud *gce.GCECloud,
|
||||
namer *utils.Namer,
|
||||
defaultBackendNodePort backends.ServicePort,
|
||||
defaultHealthCheckPath string) (*ClusterManager, error) {
|
||||
|
||||
// Names are fundamental to the cluster, the uid allocator makes sure names don't collide.
|
||||
cluster := ClusterManager{ClusterNamer: namer}
|
||||
|
||||
// NodePool stores GCE vms that are in this Kubernetes cluster.
|
||||
cluster.instancePool = instances.NewNodePool(cloud)
|
||||
|
||||
// BackendPool creates GCE BackendServices and associated health checks.
|
||||
healthChecker := healthchecks.NewHealthChecker(cloud, defaultHealthCheckPath, cluster.ClusterNamer)
|
||||
// Loadbalancer pool manages the default backend and its health check.
|
||||
defaultBackendHealthChecker := healthchecks.NewHealthChecker(cloud, "/healthz", cluster.ClusterNamer)
|
||||
|
||||
cluster.healthCheckers = []healthchecks.HealthChecker{healthChecker, defaultBackendHealthChecker}
|
||||
|
||||
// TODO: This needs to change to a consolidated management of the default backend.
|
||||
cluster.backendPool = backends.NewBackendPool(cloud, healthChecker, cluster.instancePool, cluster.ClusterNamer, []int64{defaultBackendNodePort.Port}, true)
|
||||
defaultBackendPool := backends.NewBackendPool(cloud, defaultBackendHealthChecker, cluster.instancePool, cluster.ClusterNamer, []int64{}, false)
|
||||
cluster.defaultBackendNodePort = defaultBackendNodePort
|
||||
|
||||
// L7 pool creates targetHTTPProxy, ForwardingRules, UrlMaps, StaticIPs.
|
||||
cluster.l7Pool = loadbalancers.NewLoadBalancerPool(cloud, defaultBackendPool, defaultBackendNodePort, cluster.ClusterNamer)
|
||||
cluster.firewallPool = firewalls.NewFirewallPool(cloud, cluster.ClusterNamer)
|
||||
return &cluster, nil
|
||||
}
|
|
@ -1,498 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
informerv1 "k8s.io/client-go/informers/core/v1"
|
||||
informerv1beta1 "k8s.io/client-go/informers/extensions/v1beta1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
scheme "k8s.io/client-go/kubernetes/scheme"
|
||||
unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
)
|
||||
|
||||
var (
|
||||
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
|
||||
|
||||
// DefaultClusterUID is the uid to use for clusters resources created by an
|
||||
// L7 controller created without specifying the --cluster-uid flag.
|
||||
DefaultClusterUID = ""
|
||||
|
||||
// DefaultFirewallName is the name to user for firewall rules created
|
||||
// by an L7 controller when the --fireall-rule is not used.
|
||||
DefaultFirewallName = ""
|
||||
|
||||
// Frequency to poll on local stores to sync.
|
||||
storeSyncPollPeriod = 5 * time.Second
|
||||
)
|
||||
|
||||
// ControllerContext holds
|
||||
type ControllerContext struct {
|
||||
IngressInformer cache.SharedIndexInformer
|
||||
ServiceInformer cache.SharedIndexInformer
|
||||
PodInformer cache.SharedIndexInformer
|
||||
NodeInformer cache.SharedIndexInformer
|
||||
// Stop is the stop channel shared among controllers
|
||||
StopCh chan struct{}
|
||||
}
|
||||
|
||||
func NewControllerContext(kubeClient kubernetes.Interface, namespace string, resyncPeriod time.Duration) *ControllerContext {
|
||||
return &ControllerContext{
|
||||
IngressInformer: informerv1beta1.NewIngressInformer(kubeClient, namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}),
|
||||
ServiceInformer: informerv1.NewServiceInformer(kubeClient, namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}),
|
||||
PodInformer: informerv1.NewPodInformer(kubeClient, namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}),
|
||||
NodeInformer: informerv1.NewNodeInformer(kubeClient, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}),
|
||||
StopCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *ControllerContext) Start() {
|
||||
go ctx.IngressInformer.Run(ctx.StopCh)
|
||||
go ctx.ServiceInformer.Run(ctx.StopCh)
|
||||
go ctx.PodInformer.Run(ctx.StopCh)
|
||||
go ctx.NodeInformer.Run(ctx.StopCh)
|
||||
}
|
||||
|
||||
// LoadBalancerController watches the kubernetes api and adds/removes services
|
||||
// from the loadbalancer, via loadBalancerConfig.
|
||||
type LoadBalancerController struct {
|
||||
client kubernetes.Interface
|
||||
|
||||
ingressSynced cache.InformerSynced
|
||||
serviceSynced cache.InformerSynced
|
||||
podSynced cache.InformerSynced
|
||||
nodeSynced cache.InformerSynced
|
||||
ingLister StoreToIngressLister
|
||||
nodeLister StoreToNodeLister
|
||||
svcLister StoreToServiceLister
|
||||
// Health checks are the readiness probes of containers on pods.
|
||||
podLister StoreToPodLister
|
||||
// TODO: Watch secrets
|
||||
CloudClusterManager *ClusterManager
|
||||
recorder record.EventRecorder
|
||||
nodeQueue *taskQueue
|
||||
ingQueue *taskQueue
|
||||
tr *GCETranslator
|
||||
stopCh chan struct{}
|
||||
// stopLock is used to enforce only a single call to Stop is active.
|
||||
// Needed because we allow stopping through an http endpoint and
|
||||
// allowing concurrent stoppers leads to stack traces.
|
||||
stopLock sync.Mutex
|
||||
shutdown bool
|
||||
// tlsLoader loads secrets from the Kubernetes apiserver for Ingresses.
|
||||
tlsLoader tlsLoader
|
||||
// hasSynced returns true if all associated sub-controllers have synced.
|
||||
// Abstracted into a func for testing.
|
||||
hasSynced func() bool
|
||||
}
|
||||
|
||||
// NewLoadBalancerController creates a controller for gce loadbalancers.
|
||||
// - kubeClient: A kubernetes REST client.
|
||||
// - clusterManager: A ClusterManager capable of creating all cloud resources
|
||||
// required for L7 loadbalancing.
|
||||
// - resyncPeriod: Watchers relist from the Kubernetes API server this often.
|
||||
func NewLoadBalancerController(kubeClient kubernetes.Interface, ctx *ControllerContext, clusterManager *ClusterManager) (*LoadBalancerController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{
|
||||
Interface: kubeClient.Core().Events(""),
|
||||
})
|
||||
lbc := LoadBalancerController{
|
||||
client: kubeClient,
|
||||
CloudClusterManager: clusterManager,
|
||||
stopCh: ctx.StopCh,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme,
|
||||
apiv1.EventSource{Component: "loadbalancer-controller"}),
|
||||
}
|
||||
lbc.nodeQueue = NewTaskQueue(lbc.syncNodes)
|
||||
lbc.ingQueue = NewTaskQueue(lbc.sync)
|
||||
lbc.hasSynced = lbc.storesSynced
|
||||
|
||||
lbc.ingressSynced = ctx.IngressInformer.HasSynced
|
||||
lbc.serviceSynced = ctx.ServiceInformer.HasSynced
|
||||
lbc.podSynced = ctx.PodInformer.HasSynced
|
||||
lbc.nodeSynced = ctx.NodeInformer.HasSynced
|
||||
|
||||
lbc.ingLister.Store = ctx.IngressInformer.GetStore()
|
||||
lbc.svcLister.Indexer = ctx.ServiceInformer.GetIndexer()
|
||||
lbc.podLister.Indexer = ctx.PodInformer.GetIndexer()
|
||||
lbc.nodeLister.Indexer = ctx.NodeInformer.GetIndexer()
|
||||
// ingress event handler
|
||||
ctx.IngressInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
addIng := obj.(*extensions.Ingress)
|
||||
if !isGCEIngress(addIng) && !isGCEMultiClusterIngress(addIng) {
|
||||
glog.Infof("Ignoring add for ingress %v based on annotation %v", addIng.Name, ingressClassKey)
|
||||
return
|
||||
}
|
||||
lbc.recorder.Eventf(addIng, apiv1.EventTypeNormal, "ADD", fmt.Sprintf("%s/%s", addIng.Namespace, addIng.Name))
|
||||
lbc.ingQueue.enqueue(obj)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
delIng := obj.(*extensions.Ingress)
|
||||
if !isGCEIngress(delIng) && !isGCEMultiClusterIngress(delIng) {
|
||||
glog.Infof("Ignoring delete for ingress %v based on annotation %v", delIng.Name, ingressClassKey)
|
||||
return
|
||||
}
|
||||
glog.Infof("Delete notification received for Ingress %v/%v", delIng.Namespace, delIng.Name)
|
||||
lbc.ingQueue.enqueue(obj)
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curIng := cur.(*extensions.Ingress)
|
||||
if !isGCEIngress(curIng) && !isGCEMultiClusterIngress(curIng) {
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(old, cur) {
|
||||
glog.V(3).Infof("Ingress %v changed, syncing", curIng.Name)
|
||||
}
|
||||
lbc.ingQueue.enqueue(cur)
|
||||
},
|
||||
})
|
||||
|
||||
// service event handler
|
||||
ctx.ServiceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: lbc.enqueueIngressForService,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if !reflect.DeepEqual(old, cur) {
|
||||
lbc.enqueueIngressForService(cur)
|
||||
}
|
||||
},
|
||||
// Ingress deletes matter, service deletes don't.
|
||||
})
|
||||
|
||||
// node event handler
|
||||
ctx.NodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: lbc.nodeQueue.enqueue,
|
||||
DeleteFunc: lbc.nodeQueue.enqueue,
|
||||
// Nodes are updated every 10s and we don't care, so no update handler.
|
||||
})
|
||||
|
||||
lbc.tr = &GCETranslator{&lbc}
|
||||
lbc.tlsLoader = &apiServerTLSLoader{client: lbc.client}
|
||||
glog.V(3).Infof("Created new loadbalancer controller")
|
||||
|
||||
return &lbc, nil
|
||||
}
|
||||
|
||||
// enqueueIngressForService enqueues all the Ingress' for a Service.
|
||||
func (lbc *LoadBalancerController) enqueueIngressForService(obj interface{}) {
|
||||
svc := obj.(*apiv1.Service)
|
||||
ings, err := lbc.ingLister.GetServiceIngress(svc)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("ignoring service %v: %v", svc.Name, err)
|
||||
return
|
||||
}
|
||||
for _, ing := range ings {
|
||||
if !isGCEIngress(&ing) {
|
||||
continue
|
||||
}
|
||||
lbc.ingQueue.enqueue(&ing)
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the loadbalancer controller.
|
||||
func (lbc *LoadBalancerController) Run() {
|
||||
glog.Infof("Starting loadbalancer controller")
|
||||
go lbc.ingQueue.run(time.Second, lbc.stopCh)
|
||||
go lbc.nodeQueue.run(time.Second, lbc.stopCh)
|
||||
<-lbc.stopCh
|
||||
glog.Infof("Shutting down Loadbalancer Controller")
|
||||
}
|
||||
|
||||
// Stop stops the loadbalancer controller. It also deletes cluster resources
|
||||
// if deleteAll is true.
|
||||
func (lbc *LoadBalancerController) Stop(deleteAll bool) error {
|
||||
// Stop is invoked from the http endpoint.
|
||||
lbc.stopLock.Lock()
|
||||
defer lbc.stopLock.Unlock()
|
||||
|
||||
// Only try draining the workqueue if we haven't already.
|
||||
if !lbc.shutdown {
|
||||
close(lbc.stopCh)
|
||||
glog.Infof("Shutting down controller queues.")
|
||||
lbc.ingQueue.shutdown()
|
||||
lbc.nodeQueue.shutdown()
|
||||
lbc.shutdown = true
|
||||
}
|
||||
|
||||
// Deleting shared cluster resources is idempotent.
|
||||
if deleteAll {
|
||||
glog.Infof("Shutting down cluster manager.")
|
||||
return lbc.CloudClusterManager.shutdown()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// storesSynced returns true if all the sub-controllers have finished their
|
||||
// first sync with apiserver.
|
||||
func (lbc *LoadBalancerController) storesSynced() bool {
|
||||
return (
|
||||
// wait for pods to sync so we don't allocate a default health check when
|
||||
// an endpoint has a readiness probe.
|
||||
lbc.podSynced() &&
|
||||
// wait for services so we don't thrash on backend creation.
|
||||
lbc.serviceSynced() &&
|
||||
// wait for nodes so we don't disconnect a backend from an instance
|
||||
// group just because we don't realize there are nodes in that zone.
|
||||
lbc.nodeSynced() &&
|
||||
// Wait for ingresses as a safety measure. We don't really need this.
|
||||
lbc.ingressSynced())
|
||||
}
|
||||
|
||||
// sync manages Ingress create/updates/deletes.
|
||||
func (lbc *LoadBalancerController) sync(key string) (err error) {
|
||||
if !lbc.hasSynced() {
|
||||
time.Sleep(storeSyncPollPeriod)
|
||||
return fmt.Errorf("waiting for stores to sync")
|
||||
}
|
||||
glog.V(3).Infof("Syncing %v", key)
|
||||
|
||||
allIngresses, err := lbc.ingLister.ListAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gceIngresses, err := lbc.ingLister.ListGCEIngresses()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allNodePorts := lbc.tr.toNodePorts(&allIngresses)
|
||||
gceNodePorts := lbc.tr.toNodePorts(&gceIngresses)
|
||||
lbNames := lbc.ingLister.Store.ListKeys()
|
||||
lbs, err := lbc.toRuntimeInfo(gceIngresses)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeNames, err := lbc.getReadyNodeNames()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obj, ingExists, err := lbc.ingLister.Store.GetByKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This performs a 2 phase checkpoint with the cloud:
|
||||
// * Phase 1 creates/verifies resources are as expected. At the end of a
|
||||
// successful checkpoint we know that existing L7s are WAI, and the L7
|
||||
// for the Ingress associated with "key" is ready for a UrlMap update.
|
||||
// If this encounters an error, eg for quota reasons, we want to invoke
|
||||
// Phase 2 right away and retry checkpointing.
|
||||
// * Phase 2 performs GC by refcounting shared resources. This needs to
|
||||
// happen periodically whether or not stage 1 fails. At the end of a
|
||||
// successful GC we know that there are no dangling cloud resources that
|
||||
// don't have an associated Kubernetes Ingress/Service/Endpoint.
|
||||
|
||||
var syncError error
|
||||
defer func() {
|
||||
if deferErr := lbc.CloudClusterManager.GC(lbNames, allNodePorts); deferErr != nil {
|
||||
err = fmt.Errorf("error during sync %v, error during GC %v", syncError, deferErr)
|
||||
}
|
||||
glog.V(3).Infof("Finished syncing %v", key)
|
||||
}()
|
||||
// Record any errors during sync and throw a single error at the end. This
|
||||
// allows us to free up associated cloud resources ASAP.
|
||||
igs, err := lbc.CloudClusterManager.Checkpoint(lbs, nodeNames, gceNodePorts, allNodePorts)
|
||||
if err != nil {
|
||||
// TODO: Implement proper backoff for the queue.
|
||||
eventMsg := "GCE"
|
||||
if ingExists {
|
||||
lbc.recorder.Eventf(obj.(*extensions.Ingress), apiv1.EventTypeWarning, eventMsg, err.Error())
|
||||
} else {
|
||||
err = fmt.Errorf("%v, error: %v", eventMsg, err)
|
||||
}
|
||||
syncError = err
|
||||
}
|
||||
|
||||
if !ingExists {
|
||||
return syncError
|
||||
}
|
||||
ing := *obj.(*extensions.Ingress)
|
||||
if isGCEMultiClusterIngress(&ing) {
|
||||
// Add instance group names as annotation on the ingress.
|
||||
if ing.Annotations == nil {
|
||||
ing.Annotations = map[string]string{}
|
||||
}
|
||||
err = setInstanceGroupsAnnotation(ing.Annotations, igs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := lbc.updateAnnotations(ing.Name, ing.Namespace, ing.Annotations); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the UrlMap of the single loadbalancer that came through the watch.
|
||||
l7, err := lbc.CloudClusterManager.l7Pool.Get(key)
|
||||
if err != nil {
|
||||
syncError = fmt.Errorf("%v, unable to get loadbalancer: %v", syncError, err)
|
||||
return syncError
|
||||
}
|
||||
|
||||
if urlMap, err := lbc.tr.toURLMap(&ing); err != nil {
|
||||
syncError = fmt.Errorf("%v, convert to url map error %v", syncError, err)
|
||||
} else if err := l7.UpdateUrlMap(urlMap); err != nil {
|
||||
lbc.recorder.Eventf(&ing, apiv1.EventTypeWarning, "UrlMap", err.Error())
|
||||
syncError = fmt.Errorf("%v, update url map error: %v", syncError, err)
|
||||
} else if err := lbc.updateIngressStatus(l7, ing); err != nil {
|
||||
lbc.recorder.Eventf(&ing, apiv1.EventTypeWarning, "Status", err.Error())
|
||||
syncError = fmt.Errorf("%v, update ingress error: %v", syncError, err)
|
||||
}
|
||||
return syncError
|
||||
}
|
||||
|
||||
// updateIngressStatus updates the IP and annotations of a loadbalancer.
|
||||
// The annotations are parsed by kubectl describe.
|
||||
func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing extensions.Ingress) error {
|
||||
ingClient := lbc.client.Extensions().Ingresses(ing.Namespace)
|
||||
|
||||
// Update IP through update/status endpoint
|
||||
ip := l7.GetIP()
|
||||
currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currIng.Status = extensions.IngressStatus{
|
||||
LoadBalancer: apiv1.LoadBalancerStatus{
|
||||
Ingress: []apiv1.LoadBalancerIngress{
|
||||
{IP: ip},
|
||||
},
|
||||
},
|
||||
}
|
||||
if ip != "" {
|
||||
lbIPs := ing.Status.LoadBalancer.Ingress
|
||||
if len(lbIPs) == 0 || lbIPs[0].IP != ip {
|
||||
// TODO: If this update fails it's probably resource version related,
|
||||
// which means it's advantageous to retry right away vs requeuing.
|
||||
glog.Infof("Updating loadbalancer %v/%v with IP %v", ing.Namespace, ing.Name, ip)
|
||||
if _, err := ingClient.UpdateStatus(currIng); err != nil {
|
||||
return err
|
||||
}
|
||||
lbc.recorder.Eventf(currIng, apiv1.EventTypeNormal, "CREATE", "ip: %v", ip)
|
||||
}
|
||||
}
|
||||
annotations := loadbalancers.GetLBAnnotations(l7, currIng.Annotations, lbc.CloudClusterManager.backendPool)
|
||||
if err := lbc.updateAnnotations(ing.Name, ing.Namespace, annotations); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lbc *LoadBalancerController) updateAnnotations(name, namespace string, annotations map[string]string) error {
|
||||
// Update annotations through /update endpoint
|
||||
ingClient := lbc.client.Extensions().Ingresses(namespace)
|
||||
currIng, err := ingClient.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !reflect.DeepEqual(currIng.Annotations, annotations) {
|
||||
glog.V(3).Infof("Updating annotations of %v/%v", namespace, name)
|
||||
currIng.Annotations = annotations
|
||||
if _, err := ingClient.Update(currIng); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// toRuntimeInfo returns L7RuntimeInfo for the given ingresses.
|
||||
func (lbc *LoadBalancerController) toRuntimeInfo(ingList extensions.IngressList) (lbs []*loadbalancers.L7RuntimeInfo, err error) {
|
||||
for _, ing := range ingList.Items {
|
||||
k, err := keyFunc(&ing)
|
||||
if err != nil {
|
||||
glog.Warningf("Cannot get key for Ingress %v/%v: %v", ing.Namespace, ing.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var tls *loadbalancers.TLSCerts
|
||||
|
||||
annotations := ingAnnotations(ing.ObjectMeta.Annotations)
|
||||
// Load the TLS cert from the API Spec if it is not specified in the annotation.
|
||||
// TODO: enforce this with validation.
|
||||
if annotations.useNamedTLS() == "" {
|
||||
tls, err = lbc.tlsLoader.load(&ing)
|
||||
if err != nil {
|
||||
glog.Warningf("Cannot get certs for Ingress %v/%v: %v", ing.Namespace, ing.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
lbs = append(lbs, &loadbalancers.L7RuntimeInfo{
|
||||
Name: k,
|
||||
TLS: tls,
|
||||
TLSName: annotations.useNamedTLS(),
|
||||
AllowHTTP: annotations.allowHTTP(),
|
||||
StaticIPName: annotations.staticIPName(),
|
||||
})
|
||||
}
|
||||
return lbs, nil
|
||||
}
|
||||
|
||||
// syncNodes manages the syncing of kubernetes nodes to gce instance groups.
|
||||
// The instancegroups are referenced by loadbalancer backends.
|
||||
func (lbc *LoadBalancerController) syncNodes(key string) error {
|
||||
nodeNames, err := lbc.getReadyNodeNames()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := lbc.CloudClusterManager.instancePool.Sync(nodeNames); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getNodeReadyPredicate() listers.NodeConditionPredicate {
|
||||
return func(node *apiv1.Node) bool {
|
||||
for ix := range node.Status.Conditions {
|
||||
condition := &node.Status.Conditions[ix]
|
||||
if condition.Type == apiv1.NodeReady {
|
||||
return condition.Status == apiv1.ConditionTrue
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// getReadyNodeNames returns names of schedulable, ready nodes from the node lister.
|
||||
func (lbc *LoadBalancerController) getReadyNodeNames() ([]string, error) {
|
||||
nodeNames := []string{}
|
||||
nodes, err := listers.NewNodeLister(lbc.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||
if err != nil {
|
||||
return nodeNames, err
|
||||
}
|
||||
for _, n := range nodes {
|
||||
if n.Spec.Unschedulable {
|
||||
continue
|
||||
}
|
||||
nodeNames = append(nodeNames, n.Name)
|
||||
}
|
||||
return nodeNames, nil
|
||||
}
|
|
@ -1,452 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
const testClusterName = "testcluster"
|
||||
|
||||
var (
|
||||
testPathMap = map[string]string{"/foo": defaultBackendName(testClusterName)}
|
||||
testIPManager = testIP{}
|
||||
)
|
||||
|
||||
// TODO: Use utils.Namer instead of this function.
|
||||
func defaultBackendName(clusterName string) string {
|
||||
return fmt.Sprintf("%v-%v", backendPrefix, clusterName)
|
||||
}
|
||||
|
||||
// newLoadBalancerController create a loadbalancer controller.
|
||||
func newLoadBalancerController(t *testing.T, cm *fakeClusterManager) *LoadBalancerController {
|
||||
kubeClient := fake.NewSimpleClientset()
|
||||
ctx := NewControllerContext(kubeClient, api_v1.NamespaceAll, 1*time.Second)
|
||||
lb, err := NewLoadBalancerController(kubeClient, ctx, cm.ClusterManager)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
lb.hasSynced = func() bool { return true }
|
||||
return lb
|
||||
}
|
||||
|
||||
// toHTTPIngressPaths converts the given pathMap to a list of HTTPIngressPaths.
|
||||
func toHTTPIngressPaths(pathMap map[string]string) []extensions.HTTPIngressPath {
|
||||
httpPaths := []extensions.HTTPIngressPath{}
|
||||
for path, backend := range pathMap {
|
||||
httpPaths = append(httpPaths, extensions.HTTPIngressPath{
|
||||
Path: path,
|
||||
Backend: extensions.IngressBackend{
|
||||
ServiceName: backend,
|
||||
ServicePort: testBackendPort,
|
||||
},
|
||||
})
|
||||
}
|
||||
return httpPaths
|
||||
}
|
||||
|
||||
// toIngressRules converts the given ingressRule map to a list of IngressRules.
|
||||
func toIngressRules(hostRules map[string]utils.FakeIngressRuleValueMap) []extensions.IngressRule {
|
||||
rules := []extensions.IngressRule{}
|
||||
for host, pathMap := range hostRules {
|
||||
rules = append(rules, extensions.IngressRule{
|
||||
Host: host,
|
||||
IngressRuleValue: extensions.IngressRuleValue{
|
||||
HTTP: &extensions.HTTPIngressRuleValue{
|
||||
Paths: toHTTPIngressPaths(pathMap),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return rules
|
||||
}
|
||||
|
||||
// newIngress returns a new Ingress with the given path map.
|
||||
func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions.Ingress {
|
||||
return &extensions.Ingress{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v", uuid.NewUUID()),
|
||||
Namespace: api.NamespaceNone,
|
||||
},
|
||||
Spec: extensions.IngressSpec{
|
||||
Backend: &extensions.IngressBackend{
|
||||
ServiceName: defaultBackendName(testClusterName),
|
||||
ServicePort: testBackendPort,
|
||||
},
|
||||
Rules: toIngressRules(hostRules),
|
||||
},
|
||||
Status: extensions.IngressStatus{
|
||||
LoadBalancer: api_v1.LoadBalancerStatus{
|
||||
Ingress: []api_v1.LoadBalancerIngress{
|
||||
{IP: testIPManager.ip()},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// validIngress returns a valid Ingress.
|
||||
func validIngress() *extensions.Ingress {
|
||||
return newIngress(map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.bar.com": testPathMap,
|
||||
})
|
||||
}
|
||||
|
||||
// getKey returns the key for an ingress.
|
||||
func getKey(ing *extensions.Ingress, t *testing.T) string {
|
||||
key, err := keyFunc(ing)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting key for Ingress %v: %v", ing.Name, err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// nodePortManager is a helper to allocate ports to services and
|
||||
// remember the allocations.
|
||||
type nodePortManager struct {
|
||||
portMap map[string]int
|
||||
start int
|
||||
end int
|
||||
namer utils.Namer
|
||||
}
|
||||
|
||||
// randPort generated pseudo random port numbers.
|
||||
func (p *nodePortManager) getNodePort(svcName string) int {
|
||||
if port, ok := p.portMap[svcName]; ok {
|
||||
return port
|
||||
}
|
||||
p.portMap[svcName] = rand.Intn(p.end-p.start) + p.start
|
||||
return p.portMap[svcName]
|
||||
}
|
||||
|
||||
// toNodePortSvcNames converts all service names in the given map to gce node
|
||||
// port names, eg foo -> k8-be-<foo nodeport>
|
||||
func (p *nodePortManager) toNodePortSvcNames(inputMap map[string]utils.FakeIngressRuleValueMap) map[string]utils.FakeIngressRuleValueMap {
|
||||
expectedMap := map[string]utils.FakeIngressRuleValueMap{}
|
||||
for host, rules := range inputMap {
|
||||
ruleMap := utils.FakeIngressRuleValueMap{}
|
||||
for path, svc := range rules {
|
||||
ruleMap[path] = p.namer.BeName(int64(p.portMap[svc]))
|
||||
}
|
||||
expectedMap[host] = ruleMap
|
||||
}
|
||||
return expectedMap
|
||||
}
|
||||
|
||||
func newPortManager(st, end int) *nodePortManager {
|
||||
return &nodePortManager{map[string]int{}, st, end, utils.Namer{}}
|
||||
}
|
||||
|
||||
// addIngress adds an ingress to the loadbalancer controllers ingress store. If
|
||||
// a nodePortManager is supplied, it also adds all backends to the service store
|
||||
// with a nodePort acquired through it.
|
||||
func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePortManager) {
|
||||
lbc.ingLister.Store.Add(ing)
|
||||
if pm == nil {
|
||||
return
|
||||
}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
for _, path := range rule.HTTP.Paths {
|
||||
svc := &api_v1.Service{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: path.Backend.ServiceName,
|
||||
Namespace: ing.Namespace,
|
||||
},
|
||||
}
|
||||
var svcPort api_v1.ServicePort
|
||||
switch path.Backend.ServicePort.Type {
|
||||
case intstr.Int:
|
||||
svcPort = api_v1.ServicePort{Port: path.Backend.ServicePort.IntVal}
|
||||
default:
|
||||
svcPort = api_v1.ServicePort{Name: path.Backend.ServicePort.StrVal}
|
||||
}
|
||||
svcPort.NodePort = int32(pm.getNodePort(path.Backend.ServiceName))
|
||||
svc.Spec.Ports = []api_v1.ServicePort{svcPort}
|
||||
lbc.svcLister.Indexer.Add(svc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLbCreateDelete(t *testing.T) {
|
||||
testFirewallName := "quux"
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, testFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap1 := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
"/foo2": "foo2svc",
|
||||
},
|
||||
"bar.example.com": {
|
||||
"/bar1": "bar1svc",
|
||||
"/bar2": "bar2svc",
|
||||
},
|
||||
}
|
||||
inputMap2 := map[string]utils.FakeIngressRuleValueMap{
|
||||
"baz.foobar.com": {
|
||||
"/foo": "foo1svc",
|
||||
"/bar": "bar1svc",
|
||||
},
|
||||
}
|
||||
pm := newPortManager(1, 65536)
|
||||
ings := []*extensions.Ingress{}
|
||||
for _, m := range []map[string]utils.FakeIngressRuleValueMap{inputMap1, inputMap2} {
|
||||
newIng := newIngress(m)
|
||||
addIngress(lbc, newIng, pm)
|
||||
ingStoreKey := getKey(newIng, t)
|
||||
lbc.sync(ingStoreKey)
|
||||
l7, err := cm.l7Pool.Get(ingStoreKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
cm.fakeLbs.CheckURLMap(t, l7, pm.toNodePortSvcNames(m))
|
||||
ings = append(ings, newIng)
|
||||
}
|
||||
lbc.ingLister.Store.Delete(ings[0])
|
||||
lbc.sync(getKey(ings[0], t))
|
||||
|
||||
// BackendServices associated with ports of deleted Ingress' should get gc'd
|
||||
// when the Ingress is deleted, regardless of the service. At the same time
|
||||
// we shouldn't pull shared backends out from existing loadbalancers.
|
||||
unexpected := []int{pm.portMap["foo2svc"], pm.portMap["bar2svc"]}
|
||||
expected := []int{pm.portMap["foo1svc"], pm.portMap["bar1svc"]}
|
||||
firewallPorts := sets.NewString()
|
||||
pm.namer.SetFirewallName(testFirewallName)
|
||||
firewallName := pm.namer.FrName(pm.namer.FrSuffix())
|
||||
|
||||
if firewallRule, err := cm.firewallPool.(*firewalls.FirewallRules).GetFirewall(firewallName); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
} else {
|
||||
if len(firewallRule.Allowed) != 1 {
|
||||
t.Fatalf("Expected a single firewall rule")
|
||||
}
|
||||
for _, p := range firewallRule.Allowed[0].Ports {
|
||||
firewallPorts.Insert(p)
|
||||
}
|
||||
}
|
||||
|
||||
for _, port := range expected {
|
||||
if _, err := cm.backendPool.Get(int64(port)); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
if !firewallPorts.Has(fmt.Sprintf("%v", port)) {
|
||||
t.Fatalf("Expected a firewall rule for port %v", port)
|
||||
}
|
||||
}
|
||||
for _, port := range unexpected {
|
||||
if be, err := cm.backendPool.Get(int64(port)); err == nil {
|
||||
t.Fatalf("Found backend %+v for port %v", be, port)
|
||||
}
|
||||
}
|
||||
lbc.ingLister.Store.Delete(ings[1])
|
||||
lbc.sync(getKey(ings[1], t))
|
||||
|
||||
// No cluster resources (except the defaults used by the cluster manager)
|
||||
// should exist at this point.
|
||||
for _, port := range expected {
|
||||
if be, err := cm.backendPool.Get(int64(port)); err == nil {
|
||||
t.Fatalf("Found backend %+v for port %v", be, port)
|
||||
}
|
||||
}
|
||||
if len(cm.fakeLbs.Fw) != 0 || len(cm.fakeLbs.Um) != 0 || len(cm.fakeLbs.Tp) != 0 {
|
||||
t.Fatalf("Loadbalancer leaked resources")
|
||||
}
|
||||
for _, lbName := range []string{getKey(ings[0], t), getKey(ings[1], t)} {
|
||||
if l7, err := cm.l7Pool.Get(lbName); err == nil {
|
||||
t.Fatalf("Found unexpected loadbalandcer %+v: %v", l7, err)
|
||||
}
|
||||
}
|
||||
if firewallRule, err := cm.firewallPool.(*firewalls.FirewallRules).GetFirewall(firewallName); err == nil {
|
||||
t.Fatalf("Found unexpected firewall rule %v", firewallRule)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLbFaultyUpdate(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
"/foo2": "foo2svc",
|
||||
},
|
||||
"bar.example.com": {
|
||||
"/bar1": "bar1svc",
|
||||
"/bar2": "bar2svc",
|
||||
},
|
||||
}
|
||||
ing := newIngress(inputMap)
|
||||
pm := newPortManager(1, 65536)
|
||||
addIngress(lbc, ing, pm)
|
||||
|
||||
ingStoreKey := getKey(ing, t)
|
||||
lbc.sync(ingStoreKey)
|
||||
l7, err := cm.l7Pool.Get(ingStoreKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
cm.fakeLbs.CheckURLMap(t, l7, pm.toNodePortSvcNames(inputMap))
|
||||
|
||||
// Change the urlmap directly through the lb pool, resync, and
|
||||
// make sure the controller corrects it.
|
||||
l7.UpdateUrlMap(utils.GCEURLMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": &compute.BackendService{SelfLink: "foo2svc"},
|
||||
},
|
||||
})
|
||||
|
||||
lbc.sync(ingStoreKey)
|
||||
cm.fakeLbs.CheckURLMap(t, l7, pm.toNodePortSvcNames(inputMap))
|
||||
}
|
||||
|
||||
func TestLbDefaulting(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
// Make sure the controller plugs in the default values accepted by GCE.
|
||||
ing := newIngress(map[string]utils.FakeIngressRuleValueMap{"": {"": "foo1svc"}})
|
||||
pm := newPortManager(1, 65536)
|
||||
addIngress(lbc, ing, pm)
|
||||
|
||||
ingStoreKey := getKey(ing, t)
|
||||
lbc.sync(ingStoreKey)
|
||||
l7, err := cm.l7Pool.Get(ingStoreKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
expectedMap := map[string]utils.FakeIngressRuleValueMap{loadbalancers.DefaultHost: {loadbalancers.DefaultPath: "foo1svc"}}
|
||||
cm.fakeLbs.CheckURLMap(t, l7, pm.toNodePortSvcNames(expectedMap))
|
||||
}
|
||||
|
||||
func TestLbNoService(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
},
|
||||
}
|
||||
ing := newIngress(inputMap)
|
||||
ing.Spec.Backend.ServiceName = "foo1svc"
|
||||
ingStoreKey := getKey(ing, t)
|
||||
|
||||
// Adds ingress to store, but doesn't create an associated service.
|
||||
// This will still create the associated loadbalancer, it will just
|
||||
// have empty rules. The rules will get corrected when the service
|
||||
// pops up.
|
||||
addIngress(lbc, ing, nil)
|
||||
lbc.sync(ingStoreKey)
|
||||
|
||||
l7, err := cm.l7Pool.Get(ingStoreKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
// Creates the service, next sync should have complete url map.
|
||||
pm := newPortManager(1, 65536)
|
||||
addIngress(lbc, ing, pm)
|
||||
lbc.enqueueIngressForService(&api_v1.Service{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: "foo1svc",
|
||||
Namespace: ing.Namespace,
|
||||
},
|
||||
})
|
||||
// TODO: This will hang if the previous step failed to insert into queue
|
||||
key, _ := lbc.ingQueue.queue.Get()
|
||||
lbc.sync(key.(string))
|
||||
|
||||
inputMap[utils.DefaultBackendKey] = map[string]string{
|
||||
utils.DefaultBackendKey: "foo1svc",
|
||||
}
|
||||
expectedMap := pm.toNodePortSvcNames(inputMap)
|
||||
cm.fakeLbs.CheckURLMap(t, l7, expectedMap)
|
||||
}
|
||||
|
||||
func TestLbChangeStaticIP(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
},
|
||||
}
|
||||
ing := newIngress(inputMap)
|
||||
ing.Spec.Backend.ServiceName = "foo1svc"
|
||||
cert := extensions.IngressTLS{SecretName: "foo"}
|
||||
ing.Spec.TLS = []extensions.IngressTLS{cert}
|
||||
|
||||
// Add some certs so we get 2 forwarding rules, the changed static IP
|
||||
// should be assigned to both the HTTP and HTTPS forwarding rules.
|
||||
lbc.tlsLoader = &fakeTLSSecretLoader{
|
||||
fakeCerts: map[string]*loadbalancers.TLSCerts{
|
||||
cert.SecretName: {Key: "foo", Cert: "bar"},
|
||||
},
|
||||
}
|
||||
|
||||
pm := newPortManager(1, 65536)
|
||||
addIngress(lbc, ing, pm)
|
||||
ingStoreKey := getKey(ing, t)
|
||||
|
||||
// First sync creates forwarding rules and allocates an IP.
|
||||
lbc.sync(ingStoreKey)
|
||||
|
||||
// First allocate a static ip, then specify a userip in annotations.
|
||||
// The forwarding rules should contain the user ip.
|
||||
// The static ip should get cleaned up on lb tear down.
|
||||
oldIP := ing.Status.LoadBalancer.Ingress[0].IP
|
||||
oldRules := cm.fakeLbs.GetForwardingRulesWithIPs([]string{oldIP})
|
||||
if len(oldRules) != 2 || oldRules[0].IPAddress != oldRules[1].IPAddress {
|
||||
t.Fatalf("Expected 2 forwarding rules with the same IP.")
|
||||
}
|
||||
|
||||
ing.Annotations = map[string]string{staticIPNameKey: "testip"}
|
||||
cm.fakeLbs.ReserveGlobalAddress(&compute.Address{Name: "testip", Address: "1.2.3.4"})
|
||||
|
||||
// Second sync reassigns 1.2.3.4 to existing forwarding rule (by recreating it)
|
||||
lbc.sync(ingStoreKey)
|
||||
|
||||
newRules := cm.fakeLbs.GetForwardingRulesWithIPs([]string{"1.2.3.4"})
|
||||
if len(newRules) != 2 || newRules[0].IPAddress != newRules[1].IPAddress || newRules[1].IPAddress != "1.2.3.4" {
|
||||
t.Fatalf("Found unexpected forwaring rules after changing static IP annotation.")
|
||||
}
|
||||
}
|
||||
|
||||
type testIP struct {
|
||||
start int
|
||||
}
|
||||
|
||||
func (t *testIP) ip() string {
|
||||
t.start++
|
||||
return fmt.Sprintf("0.0.0.%v", t.start)
|
||||
}
|
||||
|
||||
// TODO: Test lb status update when annotation stabilize
|
|
@ -1,52 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This is the structure of the gce l7 controller:
|
||||
// apiserver <-> controller ---> pools --> cloud
|
||||
// | |
|
||||
// |-> Ingress |-> backends
|
||||
// |-> Services | |-> health checks
|
||||
// |-> Nodes |
|
||||
// |-> instance groups
|
||||
// | |-> port per backend
|
||||
// |
|
||||
// |-> loadbalancers
|
||||
// |-> http proxy
|
||||
// |-> forwarding rule
|
||||
// |-> urlmap
|
||||
// * apiserver: kubernetes api serer.
|
||||
// * controller: gce l7 controller, watches apiserver and interacts
|
||||
// with sync pools. The controller doesn't know anything about the cloud.
|
||||
// Communication between the controller and pools is 1 way.
|
||||
// * pool: the controller tells each pool about desired state by inserting
|
||||
// into shared memory store. The pools sync this with the cloud. Pools are
|
||||
// also responsible for periodically checking the edge links between various
|
||||
// cloud resources.
|
||||
//
|
||||
// A note on sync pools: this package has 3 sync pools: for node, instances and
|
||||
// loadbalancer resources. A sync pool is meant to record all creates/deletes
|
||||
// performed by a controller and periodically verify that links are not broken.
|
||||
// For example, the controller might create a backend via backendPool.Add(),
|
||||
// the backend pool remembers this and continuously verifies that the backend
|
||||
// is connected to the right instance group, and that the instance group has
|
||||
// the right ports open.
|
||||
//
|
||||
// A note on naming convention: per golang style guide for Initialisms, Http
|
||||
// should be HTTP and Url should be URL, however because these interfaces
|
||||
// must match their siblings in the Kubernetes cloud provider, which are in turn
|
||||
// consistent with GCE compute API, there might be inconsistencies.
|
||||
|
||||
package controller
|
|
@ -1,77 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
testDefaultBeNodePort = backends.ServicePort{Port: 3000, Protocol: utils.ProtocolHTTP}
|
||||
testBackendPort = intstr.IntOrString{Type: intstr.Int, IntVal: 80}
|
||||
)
|
||||
|
||||
// ClusterManager fake
|
||||
type fakeClusterManager struct {
|
||||
*ClusterManager
|
||||
fakeLbs *loadbalancers.FakeLoadBalancers
|
||||
fakeBackends *backends.FakeBackendServices
|
||||
fakeIGs *instances.FakeInstanceGroups
|
||||
}
|
||||
|
||||
// NewFakeClusterManager creates a new fake ClusterManager.
|
||||
func NewFakeClusterManager(clusterName, firewallName string) *fakeClusterManager {
|
||||
fakeLbs := loadbalancers.NewFakeLoadBalancers(clusterName)
|
||||
fakeBackends := backends.NewFakeBackendServices(func(op int, be *compute.BackendService) error { return nil })
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
fakeHCP := healthchecks.NewFakeHealthCheckProvider()
|
||||
namer := utils.NewNamer(clusterName, firewallName)
|
||||
|
||||
nodePool := instances.NewNodePool(fakeIGs)
|
||||
nodePool.Init(&instances.FakeZoneLister{Zones: []string{"zone-a"}})
|
||||
|
||||
healthChecker := healthchecks.NewHealthChecker(fakeHCP, "/", namer)
|
||||
|
||||
backendPool := backends.NewBackendPool(
|
||||
fakeBackends,
|
||||
healthChecker, nodePool, namer, []int64{}, false)
|
||||
l7Pool := loadbalancers.NewLoadBalancerPool(
|
||||
fakeLbs,
|
||||
// TODO: change this
|
||||
backendPool,
|
||||
testDefaultBeNodePort,
|
||||
namer,
|
||||
)
|
||||
frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallsProvider(), namer)
|
||||
cm := &ClusterManager{
|
||||
ClusterNamer: namer,
|
||||
instancePool: nodePool,
|
||||
backendPool: backendPool,
|
||||
l7Pool: l7Pool,
|
||||
firewallPool: frPool,
|
||||
}
|
||||
return &fakeClusterManager{cm, fakeLbs, fakeBackends, fakeIGs}
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
)
|
||||
|
||||
// secretLoaders returns a type containing all the secrets of an Ingress.
|
||||
type tlsLoader interface {
|
||||
load(ing *extensions.Ingress) (*loadbalancers.TLSCerts, error)
|
||||
validate(certs *loadbalancers.TLSCerts) error
|
||||
}
|
||||
|
||||
// TODO: Add better cert validation.
|
||||
type noOPValidator struct{}
|
||||
|
||||
func (n *noOPValidator) validate(certs *loadbalancers.TLSCerts) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// apiServerTLSLoader loads TLS certs from the apiserver.
|
||||
type apiServerTLSLoader struct {
|
||||
noOPValidator
|
||||
client kubernetes.Interface
|
||||
}
|
||||
|
||||
func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCerts, error) {
|
||||
if len(ing.Spec.TLS) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
// GCE L7s currently only support a single cert.
|
||||
if len(ing.Spec.TLS) > 1 {
|
||||
glog.Warningf("Ignoring %d certs and taking the first for ingress %v/%v",
|
||||
len(ing.Spec.TLS)-1, ing.Namespace, ing.Name)
|
||||
}
|
||||
secretName := ing.Spec.TLS[0].SecretName
|
||||
// TODO: Replace this for a secret watcher.
|
||||
glog.V(3).Infof("Retrieving secret for ing %v with name %v", ing.Name, secretName)
|
||||
secret, err := t.client.Core().Secrets(ing.Namespace).Get(secretName, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, ok := secret.Data[api_v1.TLSCertKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("secret %v has no 'tls.crt'", secretName)
|
||||
}
|
||||
key, ok := secret.Data[api_v1.TLSPrivateKeyKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("secret %v has no 'tls.key'", secretName)
|
||||
}
|
||||
certs := &loadbalancers.TLSCerts{Key: string(key), Cert: string(cert)}
|
||||
if err := t.validate(certs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
// TODO: Add support for file loading so we can support HTTPS default backends.
|
||||
|
||||
// fakeTLSSecretLoader fakes out TLS loading.
|
||||
type fakeTLSSecretLoader struct {
|
||||
noOPValidator
|
||||
fakeCerts map[string]*loadbalancers.TLSCerts
|
||||
}
|
||||
|
||||
func (f *fakeTLSSecretLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCerts, error) {
|
||||
if len(ing.Spec.TLS) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
for name, cert := range f.fakeCerts {
|
||||
if ing.Spec.TLS[0].SecretName == name {
|
||||
return cert, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("couldn't find secret for ingress %v", ing.Name)
|
||||
}
|
|
@ -1,706 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// allowHTTPKey tells the Ingress controller to allow/block HTTP access.
|
||||
// If either unset or set to true, the controller will create a
|
||||
// forwarding-rule for port 80, and any additional rules based on the TLS
|
||||
// section of the Ingress. If set to false, the controller will only create
|
||||
// rules for port 443 based on the TLS section.
|
||||
allowHTTPKey = "kubernetes.io/ingress.allow-http"
|
||||
|
||||
// staticIPNameKey tells the Ingress controller to use a specific GCE
|
||||
// static ip for its forwarding rules. If specified, the Ingress controller
|
||||
// assigns the static ip by this name to the forwarding rules of the given
|
||||
// Ingress. The controller *does not* manage this ip, it is the users
|
||||
// responsibility to create/delete it.
|
||||
staticIPNameKey = "kubernetes.io/ingress.global-static-ip-name"
|
||||
|
||||
// preSharedCertKey represents the specific pre-shared SSL
|
||||
// certicate for the Ingress controller to use. The controller *does not*
|
||||
// manage this certificate, it is the users responsibility to create/delete it.
|
||||
// In GCP, the Ingress controller assigns the SSL certificate with this name
|
||||
// to the target proxies of the Ingress.
|
||||
preSharedCertKey = "ingress.gcp.kubernetes.io/pre-shared-cert"
|
||||
|
||||
// serviceApplicationProtocolKey is a stringified JSON map of port names to
|
||||
// protocol strings. Possible values are HTTP, HTTPS
|
||||
// Example:
|
||||
// '{"my-https-port":"HTTPS","my-http-port":"HTTP"}'
|
||||
serviceApplicationProtocolKey = "service.alpha.kubernetes.io/app-protocols"
|
||||
|
||||
// ingressClassKey picks a specific "class" for the Ingress. The controller
|
||||
// only processes Ingresses with this annotation either unset, or set
|
||||
// to either gceIngessClass or the empty string.
|
||||
ingressClassKey = "kubernetes.io/ingress.class"
|
||||
gceIngressClass = "gce"
|
||||
gceMultiIngressClass = "gce-multi-cluster"
|
||||
|
||||
// Label key to denote which GCE zone a Kubernetes node is in.
|
||||
zoneKey = "failure-domain.beta.kubernetes.io/zone"
|
||||
defaultZone = ""
|
||||
|
||||
// instanceGroupsAnnotationKey is the annotation key used by controller to
|
||||
// specify the name and zone of instance groups created for the ingress.
|
||||
// This is read only for users. Controller will overrite any user updates.
|
||||
// This is only set for ingresses with ingressClass = "gce-multi-cluster"
|
||||
instanceGroupsAnnotationKey = "ingress.gcp.kubernetes.io/instance-groups"
|
||||
)
|
||||
|
||||
// ingAnnotations represents Ingress annotations.
|
||||
type ingAnnotations map[string]string
|
||||
|
||||
// allowHTTP returns the allowHTTP flag. True by default.
|
||||
func (ing ingAnnotations) allowHTTP() bool {
|
||||
val, ok := ing[allowHTTPKey]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
v, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// useNamedTLS returns the name of the GCE SSL certificate. Empty by default.
|
||||
func (ing ingAnnotations) useNamedTLS() string {
|
||||
val, ok := ing[preSharedCertKey]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func (ing ingAnnotations) staticIPName() string {
|
||||
val, ok := ing[staticIPNameKey]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func (ing ingAnnotations) ingressClass() string {
|
||||
val, ok := ing[ingressClassKey]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// svcAnnotations represents Service annotations.
|
||||
type svcAnnotations map[string]string
|
||||
|
||||
func (svc svcAnnotations) ApplicationProtocols() (map[string]utils.AppProtocol, error) {
|
||||
val, ok := svc[serviceApplicationProtocolKey]
|
||||
if !ok {
|
||||
return map[string]utils.AppProtocol{}, nil
|
||||
}
|
||||
|
||||
var portToProtos map[string]utils.AppProtocol
|
||||
err := json.Unmarshal([]byte(val), &portToProtos)
|
||||
|
||||
// Verify protocol is an accepted value
|
||||
for _, proto := range portToProtos {
|
||||
switch proto {
|
||||
case utils.ProtocolHTTP, utils.ProtocolHTTPS:
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid port application protocol: %v", proto)
|
||||
}
|
||||
}
|
||||
|
||||
return portToProtos, err
|
||||
}
|
||||
|
||||
// isGCEIngress returns true if the given Ingress either doesn't specify the
|
||||
// ingress.class annotation, or it's set to "gce".
|
||||
func isGCEIngress(ing *extensions.Ingress) bool {
|
||||
class := ingAnnotations(ing.ObjectMeta.Annotations).ingressClass()
|
||||
return class == "" || class == gceIngressClass
|
||||
}
|
||||
|
||||
// isGCEMultiClusterIngress returns true if the given Ingress has
|
||||
// ingress.class annotation set to "gce-multi-cluster".
|
||||
func isGCEMultiClusterIngress(ing *extensions.Ingress) bool {
|
||||
class := ingAnnotations(ing.ObjectMeta.Annotations).ingressClass()
|
||||
return class == gceMultiIngressClass
|
||||
}
|
||||
|
||||
// errorNodePortNotFound is an implementation of error.
|
||||
type errorNodePortNotFound struct {
|
||||
backend extensions.IngressBackend
|
||||
origErr error
|
||||
}
|
||||
|
||||
func (e errorNodePortNotFound) Error() string {
|
||||
return fmt.Sprintf("Could not find nodeport for backend %+v: %v",
|
||||
e.backend, e.origErr)
|
||||
}
|
||||
|
||||
type errorSvcAppProtosParsing struct {
|
||||
svc *api_v1.Service
|
||||
origErr error
|
||||
}
|
||||
|
||||
func (e errorSvcAppProtosParsing) Error() string {
|
||||
return fmt.Sprintf("could not parse %v annotation on Service %v/%v, err: %v", serviceApplicationProtocolKey, e.svc.Namespace, e.svc.Name, e.origErr)
|
||||
}
|
||||
|
||||
// taskQueue manages a work queue through an independent worker that
|
||||
// invokes the given sync function for every work item inserted.
|
||||
type taskQueue struct {
|
||||
// queue is the work queue the worker polls
|
||||
queue workqueue.RateLimitingInterface
|
||||
// sync is called for each item in the queue
|
||||
sync func(string) error
|
||||
// workerDone is closed when the worker exits
|
||||
workerDone chan struct{}
|
||||
}
|
||||
|
||||
func (t *taskQueue) run(period time.Duration, stopCh <-chan struct{}) {
|
||||
wait.Until(t.worker, period, stopCh)
|
||||
}
|
||||
|
||||
// enqueue enqueues ns/name of the given api object in the task queue.
|
||||
func (t *taskQueue) enqueue(obj interface{}) {
|
||||
key, err := keyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Infof("Couldn't get key for object %+v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
t.queue.Add(key)
|
||||
}
|
||||
|
||||
// worker processes work in the queue through sync.
|
||||
func (t *taskQueue) worker() {
|
||||
for {
|
||||
key, quit := t.queue.Get()
|
||||
if quit {
|
||||
close(t.workerDone)
|
||||
return
|
||||
}
|
||||
glog.V(3).Infof("Syncing %v", key)
|
||||
if err := t.sync(key.(string)); err != nil {
|
||||
glog.Errorf("Requeuing %v, err %v", key, err)
|
||||
t.queue.AddRateLimited(key)
|
||||
} else {
|
||||
t.queue.Forget(key)
|
||||
}
|
||||
t.queue.Done(key)
|
||||
}
|
||||
}
|
||||
|
||||
// shutdown shuts down the work queue and waits for the worker to ACK
|
||||
func (t *taskQueue) shutdown() {
|
||||
t.queue.ShutDown()
|
||||
<-t.workerDone
|
||||
}
|
||||
|
||||
// NewTaskQueue creates a new task queue with the given sync function.
|
||||
// The sync function is called for every element inserted into the queue.
|
||||
func NewTaskQueue(syncFn func(string) error) *taskQueue {
|
||||
return &taskQueue{
|
||||
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||
sync: syncFn,
|
||||
workerDone: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// compareLinks returns true if the 2 self links are equal.
|
||||
func compareLinks(l1, l2 string) bool {
|
||||
// TODO: These can be partial links
|
||||
return l1 == l2 && l1 != ""
|
||||
}
|
||||
|
||||
// StoreToIngressLister makes a Store that lists Ingress.
|
||||
// TODO: Move this to cache/listers post 1.1.
|
||||
type StoreToIngressLister struct {
|
||||
cache.Store
|
||||
}
|
||||
|
||||
// StoreToNodeLister makes a Store that lists Node.
|
||||
type StoreToNodeLister struct {
|
||||
cache.Indexer
|
||||
}
|
||||
|
||||
// StoreToServiceLister makes a Store that lists Service.
|
||||
type StoreToServiceLister struct {
|
||||
cache.Indexer
|
||||
}
|
||||
|
||||
// StoreToPodLister makes a Store that lists Pods.
|
||||
type StoreToPodLister struct {
|
||||
cache.Indexer
|
||||
}
|
||||
|
||||
// List returns a list of all pods based on selector
|
||||
func (s *StoreToPodLister) List(selector labels.Selector) (ret []*api_v1.Pod, err error) {
|
||||
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api_v1.Pod))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// ListAll iterates a store and passes selected item to a func
|
||||
func ListAll(store cache.Store, selector labels.Selector, appendFn cache.AppendFunc) error {
|
||||
for _, m := range store.List() {
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List lists all Ingress' in the store (both single and multi cluster ingresses).
|
||||
func (s *StoreToIngressLister) ListAll() (ing extensions.IngressList, err error) {
|
||||
for _, m := range s.Store.List() {
|
||||
newIng := m.(*extensions.Ingress)
|
||||
if isGCEIngress(newIng) || isGCEMultiClusterIngress(newIng) {
|
||||
ing.Items = append(ing.Items, *newIng)
|
||||
}
|
||||
}
|
||||
return ing, nil
|
||||
}
|
||||
|
||||
// ListGCEIngresses lists all GCE Ingress' in the store.
|
||||
func (s *StoreToIngressLister) ListGCEIngresses() (ing extensions.IngressList, err error) {
|
||||
for _, m := range s.Store.List() {
|
||||
newIng := m.(*extensions.Ingress)
|
||||
if isGCEIngress(newIng) {
|
||||
ing.Items = append(ing.Items, *newIng)
|
||||
}
|
||||
}
|
||||
return ing, nil
|
||||
}
|
||||
|
||||
// GetServiceIngress gets all the Ingress' that have rules pointing to a service.
|
||||
// Note that this ignores services without the right nodePorts.
|
||||
func (s *StoreToIngressLister) GetServiceIngress(svc *api_v1.Service) (ings []extensions.Ingress, err error) {
|
||||
IngressLoop:
|
||||
for _, m := range s.Store.List() {
|
||||
ing := *m.(*extensions.Ingress)
|
||||
if ing.Namespace != svc.Namespace {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check service of default backend
|
||||
if ing.Spec.Backend != nil && ing.Spec.Backend.ServiceName == svc.Name {
|
||||
ings = append(ings, ing)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check the target service for each path rule
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.IngressRuleValue.HTTP == nil {
|
||||
continue
|
||||
}
|
||||
for _, p := range rule.IngressRuleValue.HTTP.Paths {
|
||||
if p.Backend.ServiceName == svc.Name {
|
||||
ings = append(ings, ing)
|
||||
// Skip the rest of the rules to avoid duplicate ingresses in list
|
||||
continue IngressLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ings) == 0 {
|
||||
err = fmt.Errorf("no ingress for service %v", svc.Name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GCETranslator helps with kubernetes -> gce api conversion.
|
||||
type GCETranslator struct {
|
||||
*LoadBalancerController
|
||||
}
|
||||
|
||||
// toURLMap converts an ingress to a map of subdomain: url-regex: gce backend.
|
||||
func (t *GCETranslator) toURLMap(ing *extensions.Ingress) (utils.GCEURLMap, error) {
|
||||
hostPathBackend := utils.GCEURLMap{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.HTTP == nil {
|
||||
glog.Errorf("Ignoring non http Ingress rule")
|
||||
continue
|
||||
}
|
||||
pathToBackend := map[string]*compute.BackendService{}
|
||||
for _, p := range rule.HTTP.Paths {
|
||||
backend, err := t.toGCEBackend(&p.Backend, ing.Namespace)
|
||||
if err != nil {
|
||||
// If a service doesn't have a nodeport we can still forward traffic
|
||||
// to all other services under the assumption that the user will
|
||||
// modify nodeport.
|
||||
if _, ok := err.(errorNodePortNotFound); ok {
|
||||
t.recorder.Eventf(ing, api_v1.EventTypeWarning, "Service", err.(errorNodePortNotFound).Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// If a service doesn't have a backend, there's nothing the user
|
||||
// can do to correct this (the admin might've limited quota).
|
||||
// So keep requeuing the l7 till all backends exist.
|
||||
return utils.GCEURLMap{}, err
|
||||
}
|
||||
// The Ingress spec defines empty path as catch-all, so if a user
|
||||
// asks for a single host and multiple empty paths, all traffic is
|
||||
// sent to one of the last backend in the rules list.
|
||||
path := p.Path
|
||||
if path == "" {
|
||||
path = loadbalancers.DefaultPath
|
||||
}
|
||||
pathToBackend[path] = backend
|
||||
}
|
||||
// If multiple hostless rule sets are specified, last one wins
|
||||
host := rule.Host
|
||||
if host == "" {
|
||||
host = loadbalancers.DefaultHost
|
||||
}
|
||||
hostPathBackend[host] = pathToBackend
|
||||
}
|
||||
var defaultBackend *compute.BackendService
|
||||
if ing.Spec.Backend != nil {
|
||||
var err error
|
||||
defaultBackend, err = t.toGCEBackend(ing.Spec.Backend, ing.Namespace)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("%v", err)
|
||||
if _, ok := err.(errorNodePortNotFound); ok {
|
||||
msg = fmt.Sprintf("couldn't find nodeport for %v/%v", ing.Namespace, ing.Spec.Backend.ServiceName)
|
||||
}
|
||||
t.recorder.Eventf(ing, api_v1.EventTypeWarning, "Service", fmt.Sprintf("failed to identify user specified default backend, %v, using system default", msg))
|
||||
} else if defaultBackend != nil {
|
||||
t.recorder.Eventf(ing, api_v1.EventTypeNormal, "Service", fmt.Sprintf("default backend set to %v:%v", ing.Spec.Backend.ServiceName, defaultBackend.Port))
|
||||
}
|
||||
} else {
|
||||
t.recorder.Eventf(ing, api_v1.EventTypeNormal, "Service", "no user specified default backend, using system default")
|
||||
}
|
||||
hostPathBackend.PutDefaultBackend(defaultBackend)
|
||||
return hostPathBackend, nil
|
||||
}
|
||||
|
||||
func (t *GCETranslator) toGCEBackend(be *extensions.IngressBackend, ns string) (*compute.BackendService, error) {
|
||||
if be == nil {
|
||||
return nil, nil
|
||||
}
|
||||
port, err := t.getServiceNodePort(*be, ns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
backend, err := t.CloudClusterManager.backendPool.Get(port.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("no GCE backend exists for port %v, kube backend %+v", port, be)
|
||||
}
|
||||
return backend, nil
|
||||
}
|
||||
|
||||
// getServiceNodePort looks in the svc store for a matching service:port,
|
||||
// and returns the nodeport.
|
||||
func (t *GCETranslator) getServiceNodePort(be extensions.IngressBackend, namespace string) (backends.ServicePort, error) {
|
||||
obj, exists, err := t.svcLister.Indexer.Get(
|
||||
&api_v1.Service{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: be.ServiceName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
})
|
||||
if !exists {
|
||||
return backends.ServicePort{}, errorNodePortNotFound{be, fmt.Errorf("service %v/%v not found in store", namespace, be.ServiceName)}
|
||||
}
|
||||
if err != nil {
|
||||
return backends.ServicePort{}, errorNodePortNotFound{be, err}
|
||||
}
|
||||
svc := obj.(*api_v1.Service)
|
||||
appProtocols, err := svcAnnotations(svc.GetAnnotations()).ApplicationProtocols()
|
||||
if err != nil {
|
||||
return backends.ServicePort{}, errorSvcAppProtosParsing{svc, err}
|
||||
}
|
||||
|
||||
var port *api_v1.ServicePort
|
||||
PortLoop:
|
||||
for _, p := range svc.Spec.Ports {
|
||||
np := p
|
||||
switch be.ServicePort.Type {
|
||||
case intstr.Int:
|
||||
if p.Port == be.ServicePort.IntVal {
|
||||
port = &np
|
||||
break PortLoop
|
||||
}
|
||||
default:
|
||||
if p.Name == be.ServicePort.StrVal {
|
||||
port = &np
|
||||
break PortLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if port == nil {
|
||||
return backends.ServicePort{}, errorNodePortNotFound{be, fmt.Errorf("could not find matching nodeport from service")}
|
||||
}
|
||||
|
||||
proto := utils.ProtocolHTTP
|
||||
if protoStr, exists := appProtocols[port.Name]; exists {
|
||||
proto = utils.AppProtocol(protoStr)
|
||||
}
|
||||
|
||||
p := backends.ServicePort{
|
||||
Port: int64(port.NodePort),
|
||||
Protocol: proto,
|
||||
SvcName: types.NamespacedName{Namespace: namespace, Name: be.ServiceName},
|
||||
SvcPort: be.ServicePort,
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// toNodePorts is a helper method over ingressToNodePorts to process a list of ingresses.
|
||||
func (t *GCETranslator) toNodePorts(ings *extensions.IngressList) []backends.ServicePort {
|
||||
var knownPorts []backends.ServicePort
|
||||
for _, ing := range ings.Items {
|
||||
knownPorts = append(knownPorts, t.ingressToNodePorts(&ing)...)
|
||||
}
|
||||
return knownPorts
|
||||
}
|
||||
|
||||
// ingressToNodePorts converts a pathlist to a flat list of nodeports for the given ingress.
|
||||
func (t *GCETranslator) ingressToNodePorts(ing *extensions.Ingress) []backends.ServicePort {
|
||||
var knownPorts []backends.ServicePort
|
||||
defaultBackend := ing.Spec.Backend
|
||||
if defaultBackend != nil {
|
||||
port, err := t.getServiceNodePort(*defaultBackend, ing.Namespace)
|
||||
if err != nil {
|
||||
glog.Infof("%v", err)
|
||||
} else {
|
||||
knownPorts = append(knownPorts, port)
|
||||
}
|
||||
}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.HTTP == nil {
|
||||
glog.Errorf("ignoring non http Ingress rule")
|
||||
continue
|
||||
}
|
||||
for _, path := range rule.HTTP.Paths {
|
||||
port, err := t.getServiceNodePort(path.Backend, ing.Namespace)
|
||||
if err != nil {
|
||||
glog.Infof("%v", err)
|
||||
continue
|
||||
}
|
||||
knownPorts = append(knownPorts, port)
|
||||
}
|
||||
}
|
||||
return knownPorts
|
||||
}
|
||||
|
||||
func getZone(n *api_v1.Node) string {
|
||||
zone, ok := n.Labels[zoneKey]
|
||||
if !ok {
|
||||
return defaultZone
|
||||
}
|
||||
return zone
|
||||
}
|
||||
|
||||
// GetZoneForNode returns the zone for a given node by looking up its zone label.
|
||||
func (t *GCETranslator) GetZoneForNode(name string) (string, error) {
|
||||
nodes, err := listers.NewNodeLister(t.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, n := range nodes {
|
||||
if n.Name == name {
|
||||
// TODO: Make this more resilient to label changes by listing
|
||||
// cloud nodes and figuring out zone.
|
||||
return getZone(n), nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("node not found %v", name)
|
||||
}
|
||||
|
||||
// ListZones returns a list of zones this Kubernetes cluster spans.
|
||||
func (t *GCETranslator) ListZones() ([]string, error) {
|
||||
zones := sets.String{}
|
||||
readyNodes, err := listers.NewNodeLister(t.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||
if err != nil {
|
||||
return zones.List(), err
|
||||
}
|
||||
for _, n := range readyNodes {
|
||||
zones.Insert(getZone(n))
|
||||
}
|
||||
return zones.List(), nil
|
||||
}
|
||||
|
||||
// geHTTPProbe returns the http readiness probe from the first container
|
||||
// that matches targetPort, from the set of pods matching the given labels.
|
||||
func (t *GCETranslator) getHTTPProbe(svc api_v1.Service, targetPort intstr.IntOrString, protocol utils.AppProtocol) (*api_v1.Probe, error) {
|
||||
l := svc.Spec.Selector
|
||||
|
||||
// Lookup any container with a matching targetPort from the set of pods
|
||||
// with a matching label selector.
|
||||
pl, err := t.podLister.List(labels.SelectorFromSet(labels.Set(l)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If multiple endpoints have different health checks, take the first
|
||||
sort.Sort(PodsByCreationTimestamp(pl))
|
||||
|
||||
for _, pod := range pl {
|
||||
if pod.Namespace != svc.Namespace {
|
||||
continue
|
||||
}
|
||||
logStr := fmt.Sprintf("Pod %v matching service selectors %v (targetport %+v)", pod.Name, l, targetPort)
|
||||
for _, c := range pod.Spec.Containers {
|
||||
if !isSimpleHTTPProbe(c.ReadinessProbe) || string(protocol) != string(c.ReadinessProbe.HTTPGet.Scheme) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, p := range c.Ports {
|
||||
if (targetPort.Type == intstr.Int && targetPort.IntVal == p.ContainerPort) ||
|
||||
(targetPort.Type == intstr.String && targetPort.StrVal == p.Name) {
|
||||
|
||||
readinessProbePort := c.ReadinessProbe.Handler.HTTPGet.Port
|
||||
switch readinessProbePort.Type {
|
||||
case intstr.Int:
|
||||
if readinessProbePort.IntVal == p.ContainerPort {
|
||||
return c.ReadinessProbe, nil
|
||||
}
|
||||
case intstr.String:
|
||||
if readinessProbePort.StrVal == p.Name {
|
||||
return c.ReadinessProbe, nil
|
||||
}
|
||||
}
|
||||
|
||||
glog.Infof("%v: found matching targetPort on container %v, but not on readinessProbe (%+v)",
|
||||
logStr, c.Name, c.ReadinessProbe.Handler.HTTPGet.Port)
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("%v: lacks a matching HTTP probe for use in health checks.", logStr)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// isSimpleHTTPProbe returns true if the given Probe is:
|
||||
// - an HTTPGet probe, as opposed to a tcp or exec probe
|
||||
// - has no special host or headers fields, except for possibly an HTTP Host header
|
||||
func isSimpleHTTPProbe(probe *api_v1.Probe) bool {
|
||||
return (probe != nil && probe.Handler.HTTPGet != nil && probe.Handler.HTTPGet.Host == "" &&
|
||||
(len(probe.Handler.HTTPGet.HTTPHeaders) == 0 ||
|
||||
(len(probe.Handler.HTTPGet.HTTPHeaders) == 1 && probe.Handler.HTTPGet.HTTPHeaders[0].Name == "Host")))
|
||||
}
|
||||
|
||||
// GetProbe returns a probe that's used for the given nodeport
|
||||
func (t *GCETranslator) GetProbe(port backends.ServicePort) (*api_v1.Probe, error) {
|
||||
sl := t.svcLister.List()
|
||||
|
||||
// Find the label and target port of the one service with the given nodePort
|
||||
var service api_v1.Service
|
||||
var svcPort api_v1.ServicePort
|
||||
var found bool
|
||||
OuterLoop:
|
||||
for _, as := range sl {
|
||||
service = *as.(*api_v1.Service)
|
||||
for _, sp := range service.Spec.Ports {
|
||||
svcPort = sp
|
||||
// only one Service can match this nodePort, try and look up
|
||||
// the readiness probe of the pods behind it
|
||||
if int32(port.Port) == sp.NodePort {
|
||||
found = true
|
||||
break OuterLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return nil, fmt.Errorf("unable to find nodeport %v in any service", port)
|
||||
}
|
||||
|
||||
return t.getHTTPProbe(service, svcPort.TargetPort, port.Protocol)
|
||||
}
|
||||
|
||||
// PodsByCreationTimestamp sorts a list of Pods by creation timestamp, using their names as a tie breaker.
|
||||
type PodsByCreationTimestamp []*api_v1.Pod
|
||||
|
||||
func (o PodsByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o PodsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o PodsByCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
// setInstanceGroupsAnnotation sets the instance-groups annotation with names of the given instance groups.
|
||||
func setInstanceGroupsAnnotation(existing map[string]string, igs []*compute.InstanceGroup) error {
|
||||
type Value struct {
|
||||
Name string
|
||||
Zone string
|
||||
}
|
||||
var instanceGroups []Value
|
||||
for _, ig := range igs {
|
||||
instanceGroups = append(instanceGroups, Value{Name: ig.Name, Zone: ig.Zone})
|
||||
}
|
||||
jsonValue, err := json.Marshal(instanceGroups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existing[instanceGroupsAnnotationKey] = string(jsonValue)
|
||||
return nil
|
||||
}
|
||||
|
||||
// uniq returns an array of unique service ports from the given array.
|
||||
func uniq(nodePorts []backends.ServicePort) []backends.ServicePort {
|
||||
portMap := map[int64]backends.ServicePort{}
|
||||
for _, p := range nodePorts {
|
||||
portMap[p.Port] = p
|
||||
}
|
||||
nodePorts = make([]backends.ServicePort, 0, len(portMap))
|
||||
for _, sp := range portMap {
|
||||
nodePorts = append(nodePorts, sp)
|
||||
}
|
||||
return nodePorts
|
||||
}
|
|
@ -1,302 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// Pods created in loops start from this time, for routines that
|
||||
// sort on timestamp.
|
||||
var firstPodCreationTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)
|
||||
|
||||
func TestZoneListing(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
zoneToNode := map[string][]string{
|
||||
"zone-1": {"n1"},
|
||||
"zone-2": {"n2"},
|
||||
}
|
||||
addNodes(lbc, zoneToNode)
|
||||
zones, err := lbc.tr.ListZones()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to list zones: %v", err)
|
||||
}
|
||||
for expectedZone := range zoneToNode {
|
||||
found := false
|
||||
for _, gotZone := range zones {
|
||||
if gotZone == expectedZone {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("Expected zones %v; Got zones %v", zoneToNode, zones)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstancesAddedToZones(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
zoneToNode := map[string][]string{
|
||||
"zone-1": {"n1", "n2"},
|
||||
"zone-2": {"n3"},
|
||||
}
|
||||
addNodes(lbc, zoneToNode)
|
||||
|
||||
// Create 2 igs, one per zone.
|
||||
testIG := "test-ig"
|
||||
lbc.CloudClusterManager.instancePool.AddInstanceGroup(testIG, []int64{int64(3001)})
|
||||
|
||||
// node pool syncs kube-nodes, this will add them to both igs.
|
||||
lbc.CloudClusterManager.instancePool.Sync([]string{"n1", "n2", "n3"})
|
||||
gotZonesToNode := cm.fakeIGs.GetInstancesByZone()
|
||||
|
||||
for z, nodeNames := range zoneToNode {
|
||||
if ig, err := cm.fakeIGs.GetInstanceGroup(testIG, z); err != nil {
|
||||
t.Errorf("Failed to find ig %v in zone %v, found %+v: %v", testIG, z, ig, err)
|
||||
}
|
||||
expNodes := sets.NewString(nodeNames...)
|
||||
gotNodes := sets.NewString(gotZonesToNode[z]...)
|
||||
if !gotNodes.Equal(expNodes) {
|
||||
t.Errorf("Nodes not added to zones, expected %+v got %+v", expNodes, gotNodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbeGetter(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
|
||||
nodePortToHealthCheck := map[backends.ServicePort]string{
|
||||
{Port: 3001, Protocol: utils.ProtocolHTTP}: "/healthz",
|
||||
{Port: 3002, Protocol: utils.ProtocolHTTPS}: "/foo",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||
for p, exp := range nodePortToHealthCheck {
|
||||
got, err := lbc.tr.GetProbe(p)
|
||||
if err != nil || got == nil {
|
||||
t.Errorf("Failed to get probe for node port %v: %v", p, err)
|
||||
} else if getProbePath(got) != exp {
|
||||
t.Errorf("Wrong path for node port %v, got %v expected %v", p, getProbePath(got), exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbeGetterNamedPort(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
nodePortToHealthCheck := map[backends.ServicePort]string{
|
||||
{Port: 3001, Protocol: utils.ProtocolHTTP}: "/healthz",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||
for _, p := range lbc.podLister.Indexer.List() {
|
||||
pod := p.(*api_v1.Pod)
|
||||
pod.Spec.Containers[0].Ports[0].Name = "test"
|
||||
pod.Spec.Containers[0].ReadinessProbe.Handler.HTTPGet.Port = intstr.IntOrString{Type: intstr.String, StrVal: "test"}
|
||||
}
|
||||
for p, exp := range nodePortToHealthCheck {
|
||||
got, err := lbc.tr.GetProbe(p)
|
||||
if err != nil || got == nil {
|
||||
t.Errorf("Failed to get probe for node port %v: %v", p, err)
|
||||
} else if getProbePath(got) != exp {
|
||||
t.Errorf("Wrong path for node port %v, got %v expected %v", p, getProbePath(got), exp)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestProbeGetterCrossNamespace(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
|
||||
firstPod := &api_v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
// labels match those added by "addPods", but ns and health check
|
||||
// path is different. If this pod was created in the same ns, it
|
||||
// would become the health check.
|
||||
Labels: map[string]string{"app-3001": "test"},
|
||||
Name: fmt.Sprintf("test-pod-new-ns"),
|
||||
Namespace: "new-ns",
|
||||
CreationTimestamp: meta_v1.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))),
|
||||
},
|
||||
Spec: api_v1.PodSpec{
|
||||
Containers: []api_v1.Container{
|
||||
{
|
||||
Ports: []api_v1.ContainerPort{{ContainerPort: 80}},
|
||||
ReadinessProbe: &api_v1.Probe{
|
||||
Handler: api_v1.Handler{
|
||||
HTTPGet: &api_v1.HTTPGetAction{
|
||||
Scheme: api_v1.URISchemeHTTP,
|
||||
Path: "/badpath",
|
||||
Port: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
lbc.podLister.Indexer.Add(firstPod)
|
||||
nodePortToHealthCheck := map[backends.ServicePort]string{
|
||||
{Port: 3001, Protocol: utils.ProtocolHTTP}: "/healthz",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||
|
||||
for p, exp := range nodePortToHealthCheck {
|
||||
got, err := lbc.tr.GetProbe(p)
|
||||
if err != nil || got == nil {
|
||||
t.Errorf("Failed to get probe for node port %v: %v", p, err)
|
||||
} else if getProbePath(got) != exp {
|
||||
t.Errorf("Wrong path for node port %v, got %v expected %v", p, getProbePath(got), exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[backends.ServicePort]string, ns string) {
|
||||
delay := time.Minute
|
||||
for np, u := range nodePortToHealthCheck {
|
||||
l := map[string]string{fmt.Sprintf("app-%d", np.Port): "test"}
|
||||
svc := &api_v1.Service{
|
||||
Spec: api_v1.ServiceSpec{
|
||||
Selector: l,
|
||||
Ports: []api_v1.ServicePort{
|
||||
{
|
||||
NodePort: int32(np.Port),
|
||||
TargetPort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
svc.Name = fmt.Sprintf("%d", np.Port)
|
||||
svc.Namespace = ns
|
||||
lbc.svcLister.Indexer.Add(svc)
|
||||
|
||||
pod := &api_v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Labels: l,
|
||||
Name: fmt.Sprintf("%d", np.Port),
|
||||
Namespace: ns,
|
||||
CreationTimestamp: meta_v1.NewTime(firstPodCreationTime.Add(delay)),
|
||||
},
|
||||
Spec: api_v1.PodSpec{
|
||||
Containers: []api_v1.Container{
|
||||
{
|
||||
Ports: []api_v1.ContainerPort{{Name: "test", ContainerPort: 80}},
|
||||
ReadinessProbe: &api_v1.Probe{
|
||||
Handler: api_v1.Handler{
|
||||
HTTPGet: &api_v1.HTTPGetAction{
|
||||
Scheme: api_v1.URIScheme(string(np.Protocol)),
|
||||
Path: u,
|
||||
Port: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
lbc.podLister.Indexer.Add(pod)
|
||||
delay = 2 * delay
|
||||
}
|
||||
}
|
||||
|
||||
func addNodes(lbc *LoadBalancerController, zoneToNode map[string][]string) {
|
||||
for zone, nodes := range zoneToNode {
|
||||
for _, node := range nodes {
|
||||
n := &api_v1.Node{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: node,
|
||||
Labels: map[string]string{
|
||||
zoneKey: zone,
|
||||
},
|
||||
},
|
||||
Status: api_v1.NodeStatus{
|
||||
Conditions: []api_v1.NodeCondition{
|
||||
{Type: api_v1.NodeReady, Status: api_v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
lbc.nodeLister.Indexer.Add(n)
|
||||
}
|
||||
}
|
||||
lbc.CloudClusterManager.instancePool.Init(lbc.tr)
|
||||
}
|
||||
|
||||
func getProbePath(p *api_v1.Probe) string {
|
||||
return p.Handler.HTTPGet.Path
|
||||
}
|
||||
|
||||
func TestAddInstanceGroupsAnnotation(t *testing.T) {
|
||||
testCases := []struct {
|
||||
Igs []*compute.InstanceGroup
|
||||
ExpectedAnnotation string
|
||||
}{
|
||||
{
|
||||
// Single zone.
|
||||
[]*compute.InstanceGroup{&compute.InstanceGroup{
|
||||
Name: "ig-name",
|
||||
Zone: "https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-b",
|
||||
}},
|
||||
`[{"Name":"ig-name","Zone":"https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-b"}]`,
|
||||
},
|
||||
{
|
||||
// Multiple zones.
|
||||
[]*compute.InstanceGroup{
|
||||
&compute.InstanceGroup{
|
||||
Name: "ig-name-1",
|
||||
Zone: "https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-b",
|
||||
},
|
||||
&compute.InstanceGroup{
|
||||
Name: "ig-name-2",
|
||||
Zone: "https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-a",
|
||||
},
|
||||
},
|
||||
`[{"Name":"ig-name-1","Zone":"https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-b"},{"Name":"ig-name-2","Zone":"https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-a"}]`,
|
||||
},
|
||||
}
|
||||
for _, c := range testCases {
|
||||
annotations := map[string]string{}
|
||||
err := setInstanceGroupsAnnotation(annotations, c.Igs)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if annotations[instanceGroupsAnnotationKey] != c.ExpectedAnnotation {
|
||||
t.Fatalf("Unexpected annotation value: %s, expected: %s", annotations[instanceGroupsAnnotationKey], c.ExpectedAnnotation)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: my-echo-deploy
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: echo
|
||||
spec:
|
||||
containers:
|
||||
- name: echoserver
|
||||
image: nicksardo/echoserver:latest
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- name: echo-443
|
||||
containerPort: 443
|
||||
# readinessProbe: # Health check settings can be retrieved from an HTTPS readinessProbe as well
|
||||
# httpGet:
|
||||
# path: /healthcheck # Custom health check path for testing
|
||||
# scheme: HTTPS
|
||||
# port: echo-443
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: my-echo-svc
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/app-protocols: '{"my-https-port":"HTTPS"}' # Must map port-name to HTTPS for the GCP ingress controller
|
||||
labels:
|
||||
app: echo
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 12345 # Port doesn't matter as nodeport is used for Ingress
|
||||
targetPort: echo-443
|
||||
protocol: TCP
|
||||
name: my-https-port
|
||||
selector:
|
||||
app: echo
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: my-echo-ingress
|
||||
spec:
|
||||
backend:
|
||||
serviceName: my-echo-svc
|
||||
servicePort: my-https-port
|
|
@ -1,72 +0,0 @@
|
|||
# Simple HTTP health check example
|
||||
|
||||
The GCE Ingress controller adopts the readiness probe from the matching endpoints, provided the readiness probe doesn't require special headers.
|
||||
|
||||
Create the following app:
|
||||
```console
|
||||
$ kubectl create -f health_check_app.yaml
|
||||
replicationcontroller "echoheaders" created
|
||||
You have exposed your service on an external port on all nodes in your
|
||||
cluster. If you want to expose this service to the external internet, you may
|
||||
need to set up firewall rules for the service port(s) (tcp:31165) to serve traffic.
|
||||
|
||||
See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
|
||||
service "echoheadersx" created
|
||||
You have exposed your service on an external port on all nodes in your
|
||||
cluster. If you want to expose this service to the external internet, you may
|
||||
need to set up firewall rules for the service port(s) (tcp:31020) to serve traffic.
|
||||
|
||||
See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
|
||||
service "echoheadersy" created
|
||||
ingress "echomap" created
|
||||
```
|
||||
|
||||
You should soon find an Ingress that is backed by a GCE Loadbalancer.
|
||||
|
||||
```console
|
||||
$ kubectl describe ing echomap
|
||||
Name: echomap
|
||||
Namespace: default
|
||||
Address: 107.178.255.228
|
||||
Default backend: default-http-backend:80 (10.180.0.9:8080,10.240.0.2:8080)
|
||||
Rules:
|
||||
Host Path Backends
|
||||
---- ---- --------
|
||||
foo.bar.com
|
||||
/foo echoheadersx:80 (<none>)
|
||||
bar.baz.com
|
||||
/bar echoheadersy:80 (<none>)
|
||||
/foo echoheadersx:80 (<none>)
|
||||
Annotations:
|
||||
target-proxy: k8s-tp-default-echomap--a9d60e8176d933ee
|
||||
url-map: k8s-um-default-echomap--a9d60e8176d933ee
|
||||
backends: {"k8s-be-31020--a9d60e8176d933ee":"HEALTHY","k8s-be-31165--a9d60e8176d933ee":"HEALTHY","k8s-be-31686--a9d60e8176d933ee":"HEALTHY"}
|
||||
forwarding-rule: k8s-fw-default-echomap--a9d60e8176d933ee
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
17m 17m 1 {loadbalancer-controller } Normal ADD default/echomap
|
||||
15m 15m 1 {loadbalancer-controller } Normal CREATE ip: 107.178.255.228
|
||||
|
||||
$ curl 107.178.255.228/foo -H 'Host:foo.bar.com'
|
||||
CLIENT VALUES:
|
||||
client_address=10.240.0.5
|
||||
command=GET
|
||||
real path=/foo
|
||||
query=nil
|
||||
request_version=1.1
|
||||
request_uri=http://foo.bar.com:8080/foo
|
||||
...
|
||||
```
|
||||
|
||||
You can confirm the health check endpoint point it's using one of 2 ways:
|
||||
* Through the cloud console: compute > health checks > lookup your health check. It takes the form k8s-be-nodePort-hash, where nodePort in the example above is 31165 and 31020, as shown by the kubectl output.
|
||||
* Through gcloud: Run `gcloud compute http-health-checks list`
|
||||
|
||||
## Limitations
|
||||
|
||||
A few points to note:
|
||||
* The readiness probe must be exposed on the port matching the `servicePort` specified in the Ingress
|
||||
* The readiness probe cannot have special requirements like headers
|
||||
* The probe timeouts are translated to GCE health check timeouts
|
||||
* You must create the pods backing the endpoints with the given readiness probe. This *will not* work if you update the replication controller with a different readiness probe.
|
|
@ -1,100 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: echoheaders
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
containers:
|
||||
- name: echoheaders
|
||||
image: gcr.io/google_containers/echoserver:1.8
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
periodSeconds: 1
|
||||
timeoutSeconds: 1
|
||||
successThreshold: 1
|
||||
failureThreshold: 10
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheadersx
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheadersy
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: echomap
|
||||
spec:
|
||||
rules:
|
||||
- host: foo.bar.com
|
||||
http:
|
||||
paths:
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheadersx
|
||||
servicePort: 80
|
||||
- host: bar.baz.com
|
||||
http:
|
||||
paths:
|
||||
- path: /bar
|
||||
backend:
|
||||
serviceName: echoheadersy
|
||||
servicePort: 80
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheadersx
|
||||
servicePort: 80
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
# Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
all:
|
||||
|
||||
KEY = /tmp/tls.key
|
||||
CERT = /tmp/tls.crt
|
||||
SECRET = /tmp/tls.json
|
||||
HOST=example.com
|
||||
NAME=tls-secret
|
||||
|
||||
keys:
|
||||
# The CName used here is specific to the service specified in nginx-app.yaml.
|
||||
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout $(KEY) -out $(CERT) -subj "/CN=$(HOST)/O=$(HOST)"
|
||||
|
||||
secret:
|
||||
godep go run make_secret.go -crt $(CERT) -key $(KEY) -name $(NAME) > $(SECRET)
|
||||
|
||||
clean:
|
||||
rm $(KEY)
|
||||
rm $(CERT)
|
|
@ -1,20 +0,0 @@
|
|||
# Simple TLS example
|
||||
|
||||
Create secret
|
||||
```console
|
||||
$ make keys secret
|
||||
$ kubectl create -f /tmp/tls.json
|
||||
```
|
||||
|
||||
Make sure you have the l7 controller running:
|
||||
```console
|
||||
$ kubectl --namespace=kube-system get pod -l name=glbc
|
||||
NAME
|
||||
l7-lb-controller-v0.6.0-1770t ...
|
||||
```
|
||||
Also make sure you have a [firewall rule](https://github.com/kubernetes/ingress/blob/master/controllers/gce/BETA_LIMITATIONS.md#creating-the-fir-glbc-health-checks) for the node port of the Service.
|
||||
|
||||
Create Ingress
|
||||
```console
|
||||
$ kubectl create -f tls-app.yaml
|
||||
```
|
|
@ -1,78 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// A small script that converts the given open ssl public/private keys to
|
||||
// a secret that it writes to stdout as json. Most common use case is to
|
||||
// create a secret from self signed certificates used to authenticate with
|
||||
// a devserver. Usage: go run make_secret.go -crt ca.crt -key priv.key > secret.json
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
registered "k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
// This installs the legacy v1 API
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
_ "k8s.io/kubernetes/pkg/api/install"
|
||||
)
|
||||
|
||||
// TODO:
|
||||
// Add a -o flag that writes to the specified destination file.
|
||||
// Teach the script to create crt and key if -crt and -key aren't specified.
|
||||
var (
|
||||
crt = flag.String("crt", "", "path to tls certificates.")
|
||||
key = flag.String("key", "", "path to tls private key.")
|
||||
name = flag.String("name", "tls-secret", "name of the secret.")
|
||||
)
|
||||
|
||||
func read(file string) []byte {
|
||||
b, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot read file %v, %v", file, err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *crt == "" || *key == "" {
|
||||
log.Fatalf("Need to specify -crt -key and -template")
|
||||
}
|
||||
tlsCrt := read(*crt)
|
||||
tlsKey := read(*key)
|
||||
secret := &api_v1.Secret{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: *name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
api_v1.TLSCertKey: tlsCrt,
|
||||
api_v1.TLSPrivateKeyKey: tlsKey,
|
||||
},
|
||||
}
|
||||
|
||||
arm, err := registered.NewAPIRegistrationManager("")
|
||||
if err != nil {
|
||||
log.Fatalf("%v", err)
|
||||
}
|
||||
fmt.Printf(runtime.EncodeOrDie(api.Codecs.LegacyCodec(arm.EnabledVersions()...), secret))
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheaders-https
|
||||
labels:
|
||||
app: echoheaders-https
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders-https
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: echoheaders-https
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: echoheaders-https
|
||||
spec:
|
||||
containers:
|
||||
- name: echoheaders-https
|
||||
image: gcr.io/google_containers/echoserver:1.3
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
tls:
|
||||
# This assumes tls-secret exists.
|
||||
# To generate it run the make in this directory.
|
||||
- secretName: tls-secret
|
||||
backend:
|
||||
serviceName: echoheaders-https
|
||||
servicePort: 80
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
FROM alpine:3.5
|
||||
|
||||
COPY wsserver /wsserver
|
||||
|
||||
CMD ["/wsserver"]
|
|
@ -1,109 +0,0 @@
|
|||
# Simple Websocket Example
|
||||
|
||||
Any websocket server will suffice; however, for the purpose of demonstration, we'll use the gorilla/websocket package in a Go process.
|
||||
|
||||
### Build
|
||||
```shell
|
||||
➜ CGO_ENABLED=0 go build -o wsserver
|
||||
```
|
||||
|
||||
### Containerize
|
||||
```shell
|
||||
➜ docker build -t nicksardo/websocketexample .
|
||||
Sending build context to Docker daemon 6.134 MB
|
||||
Step 1 : FROM alpine:3.5
|
||||
---> 4a415e366388
|
||||
Step 2 : COPY wsserver /wsserver
|
||||
---> 8002887d752d
|
||||
Removing intermediate container 7772a3e76155
|
||||
Step 3 : CMD /wsserver
|
||||
---> Running in 27c8ff226267
|
||||
---> eecd0574e5d1
|
||||
Removing intermediate container 27c8ff226267
|
||||
Successfully built eecd0574e5d1
|
||||
|
||||
➜ docker push nicksardo/websocketexample:latest
|
||||
...
|
||||
```
|
||||
|
||||
### Deploy
|
||||
Either update the image in the `Deployment` to your newly created image or continue using `nicksardo/websocketexample.`
|
||||
```shell
|
||||
➜ vi deployment.yaml
|
||||
# Change image to your own
|
||||
```
|
||||
|
||||
```shell
|
||||
➜ kubectl create -f deployment.yaml
|
||||
deployment "ws-example" created
|
||||
service "ws-example-svc" created
|
||||
ingress "ws-example-ing" created
|
||||
|
||||
```
|
||||
|
||||
### Test
|
||||
Retrieve the ingress external IP:
|
||||
```shell
|
||||
➜ kubectl get ing/ws-example-ing
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
ws-example-ing * xxx.xxx.xxx.xxx 80 3m
|
||||
```
|
||||
|
||||
Wait for the loadbalancer to be created and functioning. When you receive a successful response, you can proceed.
|
||||
```
|
||||
➜ curl http://xxx.xxx.xxx.xxx
|
||||
Websocket example. Connect to /ws%
|
||||
```
|
||||
|
||||
The binary we deployed does not have any html/javascript to demonstrate the websocket, so we'll use websocket.org's client.
|
||||
|
||||
Visit http://www.websocket.org/echo.html. It's important to use `HTTP` instead of `HTTPS` since we assembled an `HTTP` load balancer. Browsers may prevent `HTTP` websocket connections as a security feature.
|
||||
Set the `Location` to
|
||||
```
|
||||
ws://xxx.xxx.xxx.xxx/ws
|
||||
```
|
||||
Click 'Connect' and you should see messages received from server:
|
||||

|
||||
|
||||
|
||||
### Change backend timeout
|
||||
|
||||
At this point, the websocket connection will be destroyed by the HTTP(S) Load Balancer after 30 seconds, which is the default timeout. Note: this timeout is not an idle timeout - it's a timeout on the connection lifetime.
|
||||
|
||||
Currently, the GCE ingress controller does not provide a way to set this timeout via Ingress specification. You'll need to change this value either through the GCP Cloud Console or through gcloud CLI.
|
||||
|
||||
|
||||
```shell
|
||||
➜ kubectl describe ingress/ws-example-ing
|
||||
Name: ws-example-ing
|
||||
Namespace: default
|
||||
Address: xxxxxxxxxxxx
|
||||
Default backend: ws-example-svc:80 (10.48.10.12:8080,10.48.5.14:8080,10.48.7.11:8080)
|
||||
Rules:
|
||||
Host Path Backends
|
||||
---- ---- --------
|
||||
* * ws-example-svc:80 (10.48.10.12:8080,10.48.5.14:8080,10.48.7.11:8080)
|
||||
Annotations:
|
||||
target-proxy: k8s-tp-default-ws-example-ing--52aa8ae8221ffa9c
|
||||
url-map: k8s-um-default-ws-example-ing--52aa8ae8221ffa9c
|
||||
backends: {"k8s-be-31127--52aa8ae8221ffa9c":"HEALTHY"}
|
||||
forwarding-rule: k8s-fw-default-ws-example-ing--52aa8ae8221ffa9c
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
12m 12m 1 loadbalancer-controller Normal ADD default/ws-example-ing
|
||||
11m 11m 1 loadbalancer-controller Normal CREATE ip: xxxxxxxxxxxx
|
||||
11m 9m 5 loadbalancer-controller Normal Service default backend set to ws-example-svc:31127
|
||||
```
|
||||
|
||||
Retrieve the name of the backend service from within the annotation section.
|
||||
|
||||
Update the timeout field for every backend that needs a higher timeout.
|
||||
|
||||
```shell
|
||||
➜ export BACKEND=k8s-be-31127--52aa8ae8221ffa9c
|
||||
➜ gcloud compute backend-services update $BACKEND --global --timeout=86400 # seconds
|
||||
Updated [https://www.googleapis.com/compute/v1/projects/xxxxxxxxx/global/backendServices/k8s-be-31127--52aa8ae8221ffa9c].
|
||||
```
|
||||
|
||||
Wait up to twenty minutes for this change to propagate.
|
|
@ -1,47 +0,0 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ws-example
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: wseg
|
||||
spec:
|
||||
containers:
|
||||
- name: websocketexample
|
||||
image: nicksardo/websocketexample
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
env:
|
||||
- name: podname
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ws-example-svc
|
||||
labels:
|
||||
app: wseg
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: wseg
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ws-example-ing
|
||||
spec:
|
||||
backend:
|
||||
serviceName: ws-example-svc
|
||||
servicePort: 80
|
|
@ -1,81 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
var podName string
|
||||
var upgrader = websocket.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool {
|
||||
return true // Ignore http origin
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
podName = os.Getenv("podname")
|
||||
}
|
||||
|
||||
func ws(w http.ResponseWriter, r *http.Request) {
|
||||
log.Println("Received request", r.RemoteAddr)
|
||||
c, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
log.Println("failed to upgrade:", err)
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
s := fmt.Sprintf("Connected to %v", podName)
|
||||
c.WriteMessage(websocket.TextMessage, []byte(s))
|
||||
handleWSConn(c)
|
||||
}
|
||||
|
||||
func handleWSConn(c *websocket.Conn) {
|
||||
stop := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("%s reports time: %v", podName, time.Now().String())
|
||||
c.WriteMessage(websocket.TextMessage, []byte(s))
|
||||
}
|
||||
}()
|
||||
for {
|
||||
mt, message, err := c.ReadMessage()
|
||||
if err != nil {
|
||||
log.Println("Error while reading:", err)
|
||||
break
|
||||
}
|
||||
if err = c.WriteMessage(mt, message); err != nil {
|
||||
log.Println("Error while writing:", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
close(stop)
|
||||
}
|
||||
|
||||
func root(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
w.Write([]byte(`Websocket example. Connect to /ws`))
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.Println("Starting")
|
||||
http.HandleFunc("/ws", ws)
|
||||
http.HandleFunc("/", root)
|
||||
log.Fatal(http.ListenAndServe(":8080", nil))
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package firewalls
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
type fakeFirewallsProvider struct {
|
||||
fw map[string]*compute.Firewall
|
||||
networkUrl string
|
||||
}
|
||||
|
||||
// NewFakeFirewallsProvider creates a fake for firewall rules.
|
||||
func NewFakeFirewallsProvider() *fakeFirewallsProvider {
|
||||
return &fakeFirewallsProvider{
|
||||
fw: make(map[string]*compute.Firewall),
|
||||
}
|
||||
}
|
||||
|
||||
func (ff *fakeFirewallsProvider) GetFirewall(name string) (*compute.Firewall, error) {
|
||||
rule, exists := ff.fw[name]
|
||||
if exists {
|
||||
return rule, nil
|
||||
}
|
||||
return nil, utils.FakeGoogleAPINotFoundErr()
|
||||
}
|
||||
|
||||
func (ff *fakeFirewallsProvider) CreateFirewall(f *compute.Firewall) error {
|
||||
if _, exists := ff.fw[f.Name]; exists {
|
||||
return fmt.Errorf("firewall rule %v already exists", f.Name)
|
||||
}
|
||||
ff.fw[f.Name] = f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ff *fakeFirewallsProvider) DeleteFirewall(name string) error {
|
||||
// We need the full name for the same reason as CreateFirewall.
|
||||
_, exists := ff.fw[name]
|
||||
if !exists {
|
||||
return utils.FakeGoogleAPINotFoundErr()
|
||||
}
|
||||
|
||||
delete(ff.fw, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ff *fakeFirewallsProvider) UpdateFirewall(f *compute.Firewall) error {
|
||||
// We need the full name for the same reason as CreateFirewall.
|
||||
_, exists := ff.fw[f.Name]
|
||||
if !exists {
|
||||
return fmt.Errorf("update failed for rule %v, srcRange %v ports %+v, rule not found", f.Name, f.SourceRanges, f.Allowed)
|
||||
}
|
||||
|
||||
ff.fw[f.Name] = f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ff *fakeFirewallsProvider) NetworkURL() string {
|
||||
return ff.networkUrl
|
||||
}
|
||||
|
||||
func (ff *fakeFirewallsProvider) GetNodeTags(nodeNames []string) ([]string, error) {
|
||||
return nodeNames, nil
|
||||
}
|
|
@ -1,143 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package firewalls
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// Src ranges from which the GCE L7 performs health checks.
|
||||
var l7SrcRanges = []string{"130.211.0.0/22", "35.191.0.0/16"}
|
||||
|
||||
// FirewallRules manages firewall rules.
|
||||
type FirewallRules struct {
|
||||
cloud Firewall
|
||||
namer *utils.Namer
|
||||
srcRanges []string
|
||||
}
|
||||
|
||||
// NewFirewallPool creates a new firewall rule manager.
|
||||
// cloud: the cloud object implementing Firewall.
|
||||
// namer: cluster namer.
|
||||
func NewFirewallPool(cloud Firewall, namer *utils.Namer) SingleFirewallPool {
|
||||
_, err := netset.ParseIPNets(l7SrcRanges...)
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not parse L7 src ranges %v for firewall rule: %v", l7SrcRanges, err)
|
||||
}
|
||||
return &FirewallRules{cloud: cloud, namer: namer, srcRanges: l7SrcRanges}
|
||||
}
|
||||
|
||||
// Sync sync firewall rules with the cloud.
|
||||
func (fr *FirewallRules) Sync(nodePorts []int64, nodeNames []string) error {
|
||||
if len(nodePorts) == 0 {
|
||||
return fr.Shutdown()
|
||||
}
|
||||
// Firewall rule prefix must match that inserted by the gce library.
|
||||
suffix := fr.namer.FrSuffix()
|
||||
// TODO: Fix upstream gce cloudprovider lib so GET also takes the suffix
|
||||
// instead of the whole name.
|
||||
name := fr.namer.FrName(suffix)
|
||||
rule, _ := fr.cloud.GetFirewall(name)
|
||||
|
||||
firewall, err := fr.createFirewallObject(name, "GCE L7 firewall rule", nodePorts, nodeNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rule == nil {
|
||||
glog.Infof("Creating global l7 firewall rule %v", name)
|
||||
return fr.cloud.CreateFirewall(firewall)
|
||||
}
|
||||
|
||||
requiredPorts := sets.NewString()
|
||||
for _, p := range nodePorts {
|
||||
requiredPorts.Insert(strconv.Itoa(int(p)))
|
||||
}
|
||||
existingPorts := sets.NewString()
|
||||
for _, allowed := range rule.Allowed {
|
||||
for _, p := range allowed.Ports {
|
||||
existingPorts.Insert(p)
|
||||
}
|
||||
}
|
||||
|
||||
requiredCIDRs := sets.NewString(l7SrcRanges...)
|
||||
existingCIDRs := sets.NewString(rule.SourceRanges...)
|
||||
|
||||
// Do not update if ports and source cidrs are not outdated.
|
||||
// NOTE: We are not checking if nodeNames matches the firewall targetTags
|
||||
if requiredPorts.Equal(existingPorts) && requiredCIDRs.Equal(existingCIDRs) {
|
||||
glog.V(4).Info("Firewall does not need update of ports or source ranges")
|
||||
return nil
|
||||
}
|
||||
glog.V(3).Infof("Firewall %v already exists, updating nodeports %v", name, nodePorts)
|
||||
return fr.cloud.UpdateFirewall(firewall)
|
||||
}
|
||||
|
||||
// Shutdown shuts down this firewall rules manager.
|
||||
func (fr *FirewallRules) Shutdown() error {
|
||||
name := fr.namer.FrName(fr.namer.FrSuffix())
|
||||
glog.Infof("Deleting firewall %v", name)
|
||||
err := fr.cloud.DeleteFirewall(name)
|
||||
if err != nil && utils.IsHTTPErrorCode(err, 404) {
|
||||
glog.Infof("Firewall with name %v didn't exist at Shutdown", name)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetFirewall just returns the firewall object corresponding to the given name.
|
||||
// TODO: Currently only used in testing. Modify so we don't leak compute
|
||||
// objects out of this interface by returning just the (src, ports, error).
|
||||
func (fr *FirewallRules) GetFirewall(name string) (*compute.Firewall, error) {
|
||||
return fr.cloud.GetFirewall(name)
|
||||
}
|
||||
|
||||
func (fr *FirewallRules) createFirewallObject(firewallName, description string, nodePorts []int64, nodeNames []string) (*compute.Firewall, error) {
|
||||
ports := make([]string, len(nodePorts))
|
||||
for ix := range nodePorts {
|
||||
ports[ix] = strconv.Itoa(int(nodePorts[ix]))
|
||||
}
|
||||
|
||||
// If the node tags to be used for this cluster have been predefined in the
|
||||
// provider config, just use them. Otherwise, invoke computeHostTags method to get the tags.
|
||||
targetTags, err := fr.cloud.GetNodeTags(nodeNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &compute.Firewall{
|
||||
Name: firewallName,
|
||||
Description: description,
|
||||
SourceRanges: fr.srcRanges,
|
||||
Network: fr.cloud.NetworkURL(),
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: ports,
|
||||
},
|
||||
},
|
||||
TargetTags: targetTags,
|
||||
}, nil
|
||||
}
|
|
@ -1,111 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package firewalls
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
func TestSyncFirewallPool(t *testing.T) {
|
||||
namer := utils.NewNamer("ABC", "XYZ")
|
||||
fwp := NewFakeFirewallsProvider()
|
||||
fp := NewFirewallPool(fwp, namer)
|
||||
ruleName := namer.FrName(namer.FrSuffix())
|
||||
|
||||
// Test creating a firewall rule via Sync
|
||||
nodePorts := []int64{80, 443, 3000}
|
||||
nodes := []string{"node-a", "node-b", "node-c"}
|
||||
err := fp.Sync(nodePorts, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err when syncing firewall, err: %v", err)
|
||||
}
|
||||
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t)
|
||||
|
||||
// Sync to fewer ports
|
||||
nodePorts = []int64{80, 443}
|
||||
err = fp.Sync(nodePorts, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err when syncing firewall, err: %v", err)
|
||||
}
|
||||
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t)
|
||||
|
||||
firewall, err := fp.(*FirewallRules).createFirewallObject(namer.FrName(namer.FrSuffix()), "", nodePorts, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err when creating firewall object, err: %v", err)
|
||||
}
|
||||
|
||||
err = fwp.UpdateFirewall(firewall)
|
||||
if err != nil {
|
||||
t.Errorf("failed to update firewall rule, err: %v", err)
|
||||
}
|
||||
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t)
|
||||
|
||||
// Run Sync and expect l7 src ranges to be returned
|
||||
err = fp.Sync(nodePorts, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err when syncing firewall, err: %v", err)
|
||||
}
|
||||
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t)
|
||||
|
||||
// Add node and expect firewall to remain the same
|
||||
// NOTE: See computeHostTag(..) in gce cloudprovider
|
||||
nodes = []string{"node-a", "node-b", "node-c", "node-d"}
|
||||
err = fp.Sync(nodePorts, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err when syncing firewall, err: %v", err)
|
||||
}
|
||||
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t)
|
||||
|
||||
// Remove all ports and expect firewall rule to disappear
|
||||
nodePorts = []int64{}
|
||||
err = fp.Sync(nodePorts, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err when syncing firewall, err: %v", err)
|
||||
}
|
||||
|
||||
err = fp.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err when deleting firewall, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyFirewallRule(fwp *fakeFirewallsProvider, ruleName string, expectedPorts []int64, expectedNodes, expectedCIDRs []string, t *testing.T) {
|
||||
var strPorts []string
|
||||
for _, v := range expectedPorts {
|
||||
strPorts = append(strPorts, strconv.FormatInt(v, 10))
|
||||
}
|
||||
|
||||
// Verify firewall rule was created
|
||||
f, err := fwp.GetFirewall(ruleName)
|
||||
if err != nil {
|
||||
t.Errorf("could not retrieve firewall via cloud api, err %v", err)
|
||||
}
|
||||
|
||||
// Verify firewall rule has correct ports
|
||||
if !sets.NewString(f.Allowed[0].Ports...).Equal(sets.NewString(strPorts...)) {
|
||||
t.Errorf("allowed ports doesn't equal expected ports, Actual: %v, Expected: %v", f.Allowed[0].Ports, strPorts)
|
||||
}
|
||||
|
||||
// Verify firewall rule has correct CIDRs
|
||||
if !sets.NewString(f.SourceRanges...).Equal(sets.NewString(expectedCIDRs...)) {
|
||||
t.Errorf("source CIDRs doesn't equal expected CIDRs. Actual: %v, Expected: %v", f.SourceRanges, expectedCIDRs)
|
||||
}
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package firewalls
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// SingleFirewallPool syncs the firewall rule for L7 traffic.
|
||||
type SingleFirewallPool interface {
|
||||
// TODO: Take a list of node ports for the firewall.
|
||||
Sync(nodePorts []int64, nodeNames []string) error
|
||||
Shutdown() error
|
||||
}
|
||||
|
||||
// Firewall interfaces with the GCE firewall api.
|
||||
// This interface is a little different from the rest because it dovetails into
|
||||
// the same firewall methods used by the TCPLoadBalancer.
|
||||
type Firewall interface {
|
||||
CreateFirewall(f *compute.Firewall) error
|
||||
GetFirewall(name string) (*compute.Firewall, error)
|
||||
DeleteFirewall(name string) error
|
||||
UpdateFirewall(f *compute.Firewall) error
|
||||
GetNodeTags(nodeNames []string) ([]string, error)
|
||||
NetworkURL() string
|
||||
}
|
|
@ -1,114 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthchecks
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func fakeNotFoundErr() *googleapi.Error {
|
||||
return &googleapi.Error{Code: 404}
|
||||
}
|
||||
|
||||
// NewFakeHealthCheckProvider returns a new FakeHealthChecks.
|
||||
func NewFakeHealthCheckProvider() *FakeHealthCheckProvider {
|
||||
return &FakeHealthCheckProvider{
|
||||
http: make(map[string]compute.HttpHealthCheck),
|
||||
generic: make(map[string]compute.HealthCheck),
|
||||
}
|
||||
}
|
||||
|
||||
// FakeHealthCheckProvider fakes out health checks.
|
||||
type FakeHealthCheckProvider struct {
|
||||
http map[string]compute.HttpHealthCheck
|
||||
generic map[string]compute.HealthCheck
|
||||
}
|
||||
|
||||
// CreateHttpHealthCheck fakes out http health check creation.
|
||||
func (f *FakeHealthCheckProvider) CreateHttpHealthCheck(hc *compute.HttpHealthCheck) error {
|
||||
v := *hc
|
||||
v.SelfLink = "https://fake.google.com/compute/httpHealthChecks/" + hc.Name
|
||||
f.http[hc.Name] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetHttpHealthCheck fakes out getting a http health check from the cloud.
|
||||
func (f *FakeHealthCheckProvider) GetHttpHealthCheck(name string) (*compute.HttpHealthCheck, error) {
|
||||
if hc, found := f.http[name]; found {
|
||||
return &hc, nil
|
||||
}
|
||||
|
||||
return nil, fakeNotFoundErr()
|
||||
}
|
||||
|
||||
// DeleteHttpHealthCheck fakes out deleting a http health check.
|
||||
func (f *FakeHealthCheckProvider) DeleteHttpHealthCheck(name string) error {
|
||||
if _, exists := f.http[name]; !exists {
|
||||
return fakeNotFoundErr()
|
||||
}
|
||||
|
||||
delete(f.http, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateHttpHealthCheck sends the given health check as an update.
|
||||
func (f *FakeHealthCheckProvider) UpdateHttpHealthCheck(hc *compute.HttpHealthCheck) error {
|
||||
if _, exists := f.http[hc.Name]; !exists {
|
||||
return fakeNotFoundErr()
|
||||
}
|
||||
|
||||
f.http[hc.Name] = *hc
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateHealthCheck fakes out http health check creation.
|
||||
func (f *FakeHealthCheckProvider) CreateHealthCheck(hc *compute.HealthCheck) error {
|
||||
v := *hc
|
||||
v.SelfLink = "https://fake.google.com/compute/healthChecks/" + hc.Name
|
||||
f.generic[hc.Name] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetHealthCheck fakes out getting a http health check from the cloud.
|
||||
func (f *FakeHealthCheckProvider) GetHealthCheck(name string) (*compute.HealthCheck, error) {
|
||||
if hc, found := f.generic[name]; found {
|
||||
return &hc, nil
|
||||
}
|
||||
|
||||
return nil, fakeNotFoundErr()
|
||||
}
|
||||
|
||||
// DeleteHealthCheck fakes out deleting a http health check.
|
||||
func (f *FakeHealthCheckProvider) DeleteHealthCheck(name string) error {
|
||||
if _, exists := f.generic[name]; !exists {
|
||||
return fakeNotFoundErr()
|
||||
}
|
||||
|
||||
delete(f.generic, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateHealthCheck sends the given health check as an update.
|
||||
func (f *FakeHealthCheckProvider) UpdateHealthCheck(hc *compute.HealthCheck) error {
|
||||
if _, exists := f.generic[hc.Name]; !exists {
|
||||
return fakeNotFoundErr()
|
||||
}
|
||||
|
||||
f.generic[hc.Name] = *hc
|
||||
return nil
|
||||
}
|
|
@ -1,223 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthchecks
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// These values set a low health threshold and a high failure threshold.
|
||||
// We're just trying to detect if the node networking is
|
||||
// borked, service level outages will get detected sooner
|
||||
// by kube-proxy.
|
||||
// DefaultHealthCheckInterval defines how frequently a probe runs
|
||||
DefaultHealthCheckInterval = 60 * time.Second
|
||||
// DefaultHealthyThreshold defines the threshold of success probes that declare a backend "healthy"
|
||||
DefaultHealthyThreshold = 1
|
||||
// DefaultUnhealthyThreshold defines the threshold of failure probes that declare a backend "unhealthy"
|
||||
DefaultUnhealthyThreshold = 10
|
||||
// DefaultTimeout defines the timeout of each probe
|
||||
DefaultTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
// HealthChecks manages health checks.
|
||||
type HealthChecks struct {
|
||||
cloud HealthCheckProvider
|
||||
defaultPath string
|
||||
namer *utils.Namer
|
||||
}
|
||||
|
||||
// NewHealthChecker creates a new health checker.
|
||||
// cloud: the cloud object implementing SingleHealthCheck.
|
||||
// defaultHealthCheckPath: is the HTTP path to use for health checks.
|
||||
func NewHealthChecker(cloud HealthCheckProvider, defaultHealthCheckPath string, namer *utils.Namer) HealthChecker {
|
||||
return &HealthChecks{cloud, defaultHealthCheckPath, namer}
|
||||
}
|
||||
|
||||
// New returns a *HealthCheck with default settings and specified port/protocol
|
||||
func (h *HealthChecks) New(port int64, protocol utils.AppProtocol) *HealthCheck {
|
||||
hc := DefaultHealthCheck(port, protocol)
|
||||
hc.Name = h.namer.BeName(port)
|
||||
return hc
|
||||
}
|
||||
|
||||
// Sync retrieves a health check based on port, checks type and settings and updates/creates if necessary.
|
||||
// Sync is only called by the backends.Add func - it's not a pool like other resources.
|
||||
func (h *HealthChecks) Sync(hc *HealthCheck) (string, error) {
|
||||
// Verify default path
|
||||
if hc.RequestPath == "" {
|
||||
hc.RequestPath = h.defaultPath
|
||||
}
|
||||
|
||||
existingHC, err := h.Get(hc.Port)
|
||||
if err != nil {
|
||||
if !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Creating health check for port %v with protocol %v", hc.Port, hc.Type)
|
||||
if err = h.cloud.CreateHealthCheck(hc.ToComputeHealthCheck()); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return h.getHealthCheckLink(hc.Port)
|
||||
}
|
||||
|
||||
if existingHC.Protocol() != hc.Protocol() {
|
||||
glog.V(2).Infof("Updating health check %v because it has protocol %v but need %v", existingHC.Name, existingHC.Type, hc.Type)
|
||||
err = h.cloud.UpdateHealthCheck(hc.ToComputeHealthCheck())
|
||||
return existingHC.SelfLink, err
|
||||
}
|
||||
|
||||
if existingHC.RequestPath != hc.RequestPath {
|
||||
// TODO: reconcile health checks, and compare headers interval etc.
|
||||
// Currently Ingress doesn't expose all the health check params
|
||||
// natively, so some users prefer to hand modify the check.
|
||||
glog.V(2).Infof("Unexpected request path on health check %v, has %v want %v, NOT reconciling", hc.Name, existingHC.RequestPath, hc.RequestPath)
|
||||
} else {
|
||||
glog.V(2).Infof("Health check %v already exists and has the expected path %v", hc.Name, hc.RequestPath)
|
||||
}
|
||||
|
||||
return existingHC.SelfLink, nil
|
||||
}
|
||||
|
||||
func (h *HealthChecks) getHealthCheckLink(port int64) (string, error) {
|
||||
hc, err := h.Get(port)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hc.SelfLink, nil
|
||||
}
|
||||
|
||||
// Delete deletes the health check by port.
|
||||
func (h *HealthChecks) Delete(port int64) error {
|
||||
name := h.namer.BeName(port)
|
||||
glog.V(2).Infof("Deleting health check %v", name)
|
||||
return h.cloud.DeleteHealthCheck(name)
|
||||
}
|
||||
|
||||
// Get returns the health check by port
|
||||
func (h *HealthChecks) Get(port int64) (*HealthCheck, error) {
|
||||
name := h.namer.BeName(port)
|
||||
hc, err := h.cloud.GetHealthCheck(name)
|
||||
return NewHealthCheck(hc), err
|
||||
}
|
||||
|
||||
// GetLegacy deletes legacy HTTP health checks
|
||||
func (h *HealthChecks) GetLegacy(port int64) (*compute.HttpHealthCheck, error) {
|
||||
name := h.namer.BeName(port)
|
||||
return h.cloud.GetHttpHealthCheck(name)
|
||||
}
|
||||
|
||||
// DeleteLegacy deletes legacy HTTP health checks
|
||||
func (h *HealthChecks) DeleteLegacy(port int64) error {
|
||||
name := h.namer.BeName(port)
|
||||
glog.V(2).Infof("Deleting legacy HTTP health check %v", name)
|
||||
return h.cloud.DeleteHttpHealthCheck(name)
|
||||
}
|
||||
|
||||
// DefaultHealthCheck simply returns the default health check.
|
||||
func DefaultHealthCheck(port int64, protocol utils.AppProtocol) *HealthCheck {
|
||||
httpSettings := compute.HTTPHealthCheck{
|
||||
Port: port,
|
||||
// Empty string is used as a signal to the caller to use the appropriate
|
||||
// default.
|
||||
RequestPath: "",
|
||||
}
|
||||
|
||||
hcSettings := compute.HealthCheck{
|
||||
// How often to health check.
|
||||
CheckIntervalSec: int64(DefaultHealthCheckInterval.Seconds()),
|
||||
// How long to wait before claiming failure of a health check.
|
||||
TimeoutSec: int64(DefaultTimeout.Seconds()),
|
||||
// Number of healthchecks to pass for a vm to be deemed healthy.
|
||||
HealthyThreshold: DefaultHealthyThreshold,
|
||||
// Number of healthchecks to fail before the vm is deemed unhealthy.
|
||||
UnhealthyThreshold: DefaultUnhealthyThreshold,
|
||||
Description: "Default kubernetes L7 Loadbalancing health check.",
|
||||
Type: string(protocol),
|
||||
}
|
||||
|
||||
return &HealthCheck{
|
||||
HTTPHealthCheck: httpSettings,
|
||||
HealthCheck: hcSettings,
|
||||
}
|
||||
}
|
||||
|
||||
// HealthCheck embeds two types - the generic healthcheck compute.HealthCheck
|
||||
// and the HTTP settings compute.HTTPHealthCheck. By embedding both, consumers can modify
|
||||
// all relevant settings (HTTP specific and HealthCheck generic) regardless of Type
|
||||
// Consumers should call .Out() func to generate a compute.HealthCheck
|
||||
// with the proper child struct (.HttpHealthCheck, .HttpshealthCheck, etc).
|
||||
type HealthCheck struct {
|
||||
compute.HTTPHealthCheck
|
||||
compute.HealthCheck
|
||||
}
|
||||
|
||||
// NewHealthCheck creates a HealthCheck which abstracts nested structs away
|
||||
func NewHealthCheck(hc *compute.HealthCheck) *HealthCheck {
|
||||
if hc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
v := &HealthCheck{HealthCheck: *hc}
|
||||
switch utils.AppProtocol(hc.Type) {
|
||||
case utils.ProtocolHTTP:
|
||||
v.HTTPHealthCheck = *hc.HttpHealthCheck
|
||||
case utils.ProtocolHTTPS:
|
||||
// HTTPHealthCheck and HTTPSHealthChecks have identical fields
|
||||
v.HTTPHealthCheck = compute.HTTPHealthCheck(*hc.HttpsHealthCheck)
|
||||
}
|
||||
|
||||
// Users should be modifying HTTP(S) specific settings on the embedded
|
||||
// HTTPHealthCheck. Setting these to nil for preventing confusion.
|
||||
v.HealthCheck.HttpHealthCheck = nil
|
||||
v.HealthCheck.HttpsHealthCheck = nil
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Protocol returns the type cased to AppProtocol
|
||||
func (hc *HealthCheck) Protocol() utils.AppProtocol {
|
||||
return utils.AppProtocol(hc.Type)
|
||||
}
|
||||
|
||||
// ToComputeHealthCheck returns a valid compute.HealthCheck object
|
||||
func (hc *HealthCheck) ToComputeHealthCheck() *compute.HealthCheck {
|
||||
// Zeroing out child settings as a precaution. GoogleAPI throws an error
|
||||
// if the wrong child struct is set.
|
||||
hc.HealthCheck.HttpsHealthCheck = nil
|
||||
hc.HealthCheck.HttpHealthCheck = nil
|
||||
|
||||
switch hc.Protocol() {
|
||||
case utils.ProtocolHTTP:
|
||||
hc.HealthCheck.HttpHealthCheck = &hc.HTTPHealthCheck
|
||||
case utils.ProtocolHTTPS:
|
||||
https := compute.HTTPSHealthCheck(hc.HTTPHealthCheck)
|
||||
hc.HealthCheck.HttpsHealthCheck = &https
|
||||
}
|
||||
|
||||
return &hc.HealthCheck
|
||||
}
|
|
@ -1,186 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthchecks
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
func TestHealthCheckAdd(t *testing.T) {
|
||||
namer := utils.NewNamer("ABC", "XYZ")
|
||||
hcp := NewFakeHealthCheckProvider()
|
||||
healthChecks := NewHealthChecker(hcp, "/", namer)
|
||||
|
||||
hc := healthChecks.New(80, utils.ProtocolHTTP)
|
||||
_, err := healthChecks.Sync(hc)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// Verify the health check exists
|
||||
_, err = hcp.GetHealthCheck(namer.BeName(80))
|
||||
if err != nil {
|
||||
t.Fatalf("expected the health check to exist, err: %v", err)
|
||||
}
|
||||
|
||||
hc = healthChecks.New(443, utils.ProtocolHTTPS)
|
||||
_, err = healthChecks.Sync(hc)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// Verify the health check exists
|
||||
_, err = hcp.GetHealthCheck(namer.BeName(443))
|
||||
if err != nil {
|
||||
t.Fatalf("expected the health check to exist, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthCheckAddExisting(t *testing.T) {
|
||||
namer := &utils.Namer{}
|
||||
hcp := NewFakeHealthCheckProvider()
|
||||
healthChecks := NewHealthChecker(hcp, "/", namer)
|
||||
|
||||
// HTTP
|
||||
// Manually insert a health check
|
||||
httpHC := DefaultHealthCheck(3000, utils.ProtocolHTTP)
|
||||
httpHC.Name = namer.BeName(3000)
|
||||
httpHC.RequestPath = "/my-probes-health"
|
||||
hcp.CreateHealthCheck(httpHC.ToComputeHealthCheck())
|
||||
|
||||
// Should not fail adding the same type of health check
|
||||
hc := healthChecks.New(3000, utils.ProtocolHTTP)
|
||||
_, err := healthChecks.Sync(hc)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// Verify the health check exists
|
||||
_, err = hcp.GetHealthCheck(httpHC.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("expected the health check to continue existing, err: %v", err)
|
||||
}
|
||||
|
||||
// HTTPS
|
||||
// Manually insert a health check
|
||||
httpsHC := DefaultHealthCheck(4000, utils.ProtocolHTTPS)
|
||||
httpsHC.Name = namer.BeName(4000)
|
||||
httpsHC.RequestPath = "/my-probes-health"
|
||||
hcp.CreateHealthCheck(httpsHC.ToComputeHealthCheck())
|
||||
|
||||
hc = healthChecks.New(4000, utils.ProtocolHTTPS)
|
||||
_, err = healthChecks.Sync(hc)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// Verify the health check exists
|
||||
_, err = hcp.GetHealthCheck(httpsHC.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("expected the health check to continue existing, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthCheckDelete(t *testing.T) {
|
||||
namer := &utils.Namer{}
|
||||
hcp := NewFakeHealthCheckProvider()
|
||||
healthChecks := NewHealthChecker(hcp, "/", namer)
|
||||
|
||||
// Create HTTP HC for 1234
|
||||
hc := DefaultHealthCheck(1234, utils.ProtocolHTTP)
|
||||
hc.Name = namer.BeName(1234)
|
||||
hcp.CreateHealthCheck(hc.ToComputeHealthCheck())
|
||||
|
||||
// Create HTTPS HC for 1234)
|
||||
hc.Type = string(utils.ProtocolHTTPS)
|
||||
hcp.CreateHealthCheck(hc.ToComputeHealthCheck())
|
||||
|
||||
// Delete only HTTP 1234
|
||||
err := healthChecks.Delete(1234)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error when deleting health check, err: %v", err)
|
||||
}
|
||||
|
||||
// Validate port is deleted
|
||||
_, err = hcp.GetHealthCheck(hc.Name)
|
||||
if !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
t.Errorf("expected not-found error, actual: %v", err)
|
||||
}
|
||||
|
||||
// Delete only HTTP 1234
|
||||
err = healthChecks.Delete(1234)
|
||||
if err == nil {
|
||||
t.Errorf("expected not-found error when deleting health check, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthCheckUpdate(t *testing.T) {
|
||||
namer := &utils.Namer{}
|
||||
hcp := NewFakeHealthCheckProvider()
|
||||
healthChecks := NewHealthChecker(hcp, "/", namer)
|
||||
|
||||
// HTTP
|
||||
// Manually insert a health check
|
||||
hc := DefaultHealthCheck(3000, utils.ProtocolHTTP)
|
||||
hc.Name = namer.BeName(3000)
|
||||
hc.RequestPath = "/my-probes-health"
|
||||
hcp.CreateHealthCheck(hc.ToComputeHealthCheck())
|
||||
|
||||
// Verify the health check exists
|
||||
_, err := healthChecks.Get(3000)
|
||||
if err != nil {
|
||||
t.Fatalf("expected the health check to exist, err: %v", err)
|
||||
}
|
||||
|
||||
// Change to HTTPS
|
||||
hc.Type = string(utils.ProtocolHTTPS)
|
||||
_, err = healthChecks.Sync(hc)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err while syncing healthcheck, err %v", err)
|
||||
}
|
||||
|
||||
// Verify the health check exists
|
||||
_, err = healthChecks.Get(3000)
|
||||
if err != nil {
|
||||
t.Fatalf("expected the health check to exist, err: %v", err)
|
||||
}
|
||||
|
||||
// Verify the check is now HTTPS
|
||||
if hc.Protocol() != utils.ProtocolHTTPS {
|
||||
t.Fatalf("expected check to be of type HTTPS")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthCheckDeleteLegacy(t *testing.T) {
|
||||
namer := &utils.Namer{}
|
||||
hcp := NewFakeHealthCheckProvider()
|
||||
healthChecks := NewHealthChecker(hcp, "/", namer)
|
||||
|
||||
err := hcp.CreateHttpHealthCheck(&compute.HttpHealthCheck{
|
||||
Name: namer.BeName(80),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("expected health check to be created, err: %v", err)
|
||||
}
|
||||
|
||||
err = healthChecks.DeleteLegacy(80)
|
||||
if err != nil {
|
||||
t.Fatalf("expected health check to be deleted, err: %v", err)
|
||||
}
|
||||
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthchecks
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// HealthCheckProvider is an interface to manage a single GCE health check.
|
||||
type HealthCheckProvider interface {
|
||||
CreateHttpHealthCheck(hc *compute.HttpHealthCheck) error
|
||||
UpdateHttpHealthCheck(hc *compute.HttpHealthCheck) error
|
||||
DeleteHttpHealthCheck(name string) error
|
||||
GetHttpHealthCheck(name string) (*compute.HttpHealthCheck, error)
|
||||
|
||||
CreateHealthCheck(hc *compute.HealthCheck) error
|
||||
UpdateHealthCheck(hc *compute.HealthCheck) error
|
||||
DeleteHealthCheck(name string) error
|
||||
GetHealthCheck(name string) (*compute.HealthCheck, error)
|
||||
}
|
||||
|
||||
// HealthChecker is an interface to manage cloud HTTPHealthChecks.
|
||||
type HealthChecker interface {
|
||||
New(port int64, protocol utils.AppProtocol) *HealthCheck
|
||||
Sync(hc *HealthCheck) (string, error)
|
||||
Delete(port int64) error
|
||||
Get(port int64) (*HealthCheck, error)
|
||||
GetLegacy(port int64) (*compute.HttpHealthCheck, error)
|
||||
DeleteLegacy(port int64) error
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
# This Service writes the HTTP request headers out to the response. Access it
|
||||
# through its NodePort, LoadBalancer or Ingress endpoint.
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheadersx
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
nodePort: 30301
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheadersy
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
nodePort: 30284
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
# This is a replication controller for the endpoint that services the 3
|
||||
# Services above.
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: echoheaders
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
containers:
|
||||
- name: echoheaders
|
||||
image: gcr.io/google_containers/echoserver:1.8
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
|
||||
---
|
||||
# This is the Ingress resource that creates a HTTP Loadbalancer configured
|
||||
# according to the Ingress rules.
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: echomap
|
||||
spec:
|
||||
backend:
|
||||
# Re-use echoheadersx as the default backend so we stay under the default
|
||||
# quota for gce BackendServices.
|
||||
serviceName: echoheadersx
|
||||
servicePort: 80
|
||||
rules:
|
||||
- host: foo.bar.com
|
||||
http:
|
||||
paths:
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheadersx
|
||||
servicePort: 80
|
||||
- host: bar.baz.com
|
||||
http:
|
||||
paths:
|
||||
- path: /bar
|
||||
backend:
|
||||
serviceName: echoheadersy
|
||||
servicePort: 80
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheadersx
|
||||
servicePort: 80
|
||||
|
|
@ -1,204 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package instances
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// NewFakeInstanceGroups creates a new FakeInstanceGroups.
|
||||
func NewFakeInstanceGroups(nodes sets.String) *FakeInstanceGroups {
|
||||
return &FakeInstanceGroups{
|
||||
instances: nodes,
|
||||
listResult: getInstanceList(nodes),
|
||||
namer: utils.Namer{},
|
||||
zonesToInstances: map[string][]string{},
|
||||
}
|
||||
}
|
||||
|
||||
// InstanceGroup fakes
|
||||
|
||||
// FakeZoneLister records zones for nodes.
|
||||
type FakeZoneLister struct {
|
||||
Zones []string
|
||||
}
|
||||
|
||||
// ListZones returns the list of zones.
|
||||
func (z *FakeZoneLister) ListZones() ([]string, error) {
|
||||
return z.Zones, nil
|
||||
}
|
||||
|
||||
// GetZoneForNode returns the only zone stored in the fake zone lister.
|
||||
func (z *FakeZoneLister) GetZoneForNode(name string) (string, error) {
|
||||
// TODO: evolve as required, it's currently needed just to satisfy the
|
||||
// interface in unittests that don't care about zones. See unittests in
|
||||
// controller/util_test for actual zoneLister testing.
|
||||
return z.Zones[0], nil
|
||||
}
|
||||
|
||||
// FakeInstanceGroups fakes out the instance groups api.
|
||||
type FakeInstanceGroups struct {
|
||||
instances sets.String
|
||||
instanceGroups []*compute.InstanceGroup
|
||||
getResult *compute.InstanceGroup
|
||||
listResult *compute.InstanceGroupsListInstances
|
||||
calls []int
|
||||
namer utils.Namer
|
||||
zonesToInstances map[string][]string
|
||||
}
|
||||
|
||||
// GetInstanceGroup fakes getting an instance group from the cloud.
|
||||
func (f *FakeInstanceGroups) GetInstanceGroup(name, zone string) (*compute.InstanceGroup, error) {
|
||||
f.calls = append(f.calls, utils.Get)
|
||||
for _, ig := range f.instanceGroups {
|
||||
if ig.Name == name && ig.Zone == zone {
|
||||
return ig, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, utils.FakeGoogleAPINotFoundErr()
|
||||
}
|
||||
|
||||
// CreateInstanceGroup fakes instance group creation.
|
||||
func (f *FakeInstanceGroups) CreateInstanceGroup(ig *compute.InstanceGroup, zone string) error {
|
||||
ig.SelfLink = ig.Name
|
||||
ig.Zone = zone
|
||||
f.instanceGroups = append(f.instanceGroups, ig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteInstanceGroup fakes instance group deletion.
|
||||
func (f *FakeInstanceGroups) DeleteInstanceGroup(name, zone string) error {
|
||||
newGroups := []*compute.InstanceGroup{}
|
||||
found := false
|
||||
for _, ig := range f.instanceGroups {
|
||||
if ig.Name == name {
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
newGroups = append(newGroups, ig)
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("instance group %v not found", name)
|
||||
}
|
||||
f.instanceGroups = newGroups
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListInstancesInInstanceGroup fakes listing instances in an instance group.
|
||||
func (f *FakeInstanceGroups) ListInstancesInInstanceGroup(name, zone string, state string) (*compute.InstanceGroupsListInstances, error) {
|
||||
return f.listResult, nil
|
||||
}
|
||||
|
||||
// AddInstancesToInstanceGroup fakes adding instances to an instance group.
|
||||
func (f *FakeInstanceGroups) AddInstancesToInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error {
|
||||
instanceNames := toInstanceNames(instanceRefs)
|
||||
f.calls = append(f.calls, utils.AddInstances)
|
||||
f.instances.Insert(instanceNames...)
|
||||
if _, ok := f.zonesToInstances[zone]; !ok {
|
||||
f.zonesToInstances[zone] = []string{}
|
||||
}
|
||||
f.zonesToInstances[zone] = append(f.zonesToInstances[zone], instanceNames...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetInstancesByZone returns the zone to instances map.
|
||||
func (f *FakeInstanceGroups) GetInstancesByZone() map[string][]string {
|
||||
return f.zonesToInstances
|
||||
}
|
||||
|
||||
// RemoveInstancesFromInstanceGroup fakes removing instances from an instance group.
|
||||
func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error {
|
||||
instanceNames := toInstanceNames(instanceRefs)
|
||||
f.calls = append(f.calls, utils.RemoveInstances)
|
||||
f.instances.Delete(instanceNames...)
|
||||
l, ok := f.zonesToInstances[zone]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
newIns := []string{}
|
||||
delIns := sets.NewString(instanceNames...)
|
||||
for _, oldIns := range l {
|
||||
if delIns.Has(oldIns) {
|
||||
continue
|
||||
}
|
||||
newIns = append(newIns, oldIns)
|
||||
}
|
||||
f.zonesToInstances[zone] = newIns
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeInstanceGroups) SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error {
|
||||
var ig *compute.InstanceGroup
|
||||
for _, igp := range f.instanceGroups {
|
||||
if igp.Name == igName && igp.Zone == zone {
|
||||
ig = igp
|
||||
break
|
||||
}
|
||||
}
|
||||
if ig == nil {
|
||||
return fmt.Errorf("Failed to find instance group %q in zone %q", igName, zone)
|
||||
}
|
||||
|
||||
ig.NamedPorts = namedPorts
|
||||
return nil
|
||||
}
|
||||
|
||||
// getInstanceList returns an instance list based on the given names.
|
||||
// The names cannot contain a '.', the real gce api validates against this.
|
||||
func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances {
|
||||
instanceNames := nodeNames.List()
|
||||
computeInstances := []*compute.InstanceWithNamedPorts{}
|
||||
for _, name := range instanceNames {
|
||||
instanceLink := getInstanceUrl(name)
|
||||
computeInstances = append(
|
||||
computeInstances, &compute.InstanceWithNamedPorts{
|
||||
Instance: instanceLink})
|
||||
}
|
||||
return &compute.InstanceGroupsListInstances{
|
||||
Items: computeInstances,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeInstanceGroups) ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference) {
|
||||
for _, ins := range instanceNames {
|
||||
instanceLink := getInstanceUrl(ins)
|
||||
refs = append(refs, &compute.InstanceReference{Instance: instanceLink})
|
||||
}
|
||||
return refs
|
||||
}
|
||||
|
||||
func getInstanceUrl(instanceName string) string {
|
||||
return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s",
|
||||
"project", "zone", instanceName)
|
||||
}
|
||||
|
||||
func toInstanceNames(instanceRefs []*compute.InstanceReference) []string {
|
||||
instanceNames := make([]string, len(instanceRefs))
|
||||
for ix := range instanceRefs {
|
||||
url := instanceRefs[ix].Instance
|
||||
parts := strings.Split(url, "/")
|
||||
instanceNames[ix] = parts[len(parts)-1]
|
||||
}
|
||||
return instanceNames
|
||||
}
|
|
@ -1,285 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package instances
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// State string required by gce library to list all instances.
|
||||
allInstances = "ALL"
|
||||
)
|
||||
|
||||
// Instances implements NodePool.
|
||||
type Instances struct {
|
||||
cloud InstanceGroups
|
||||
// zones is a list of zones seeded by Kubernetes node zones.
|
||||
// TODO: we can figure this out.
|
||||
snapshotter storage.Snapshotter
|
||||
zoneLister
|
||||
}
|
||||
|
||||
// NewNodePool creates a new node pool.
|
||||
// - cloud: implements InstanceGroups, used to sync Kubernetes nodes with
|
||||
// members of the cloud InstanceGroup.
|
||||
func NewNodePool(cloud InstanceGroups) NodePool {
|
||||
return &Instances{cloud, storage.NewInMemoryPool(), nil}
|
||||
}
|
||||
|
||||
// Init initializes the instance pool. The given zoneLister is used to list
|
||||
// all zones that require an instance group, and to lookup which zone a
|
||||
// given Kubernetes node is in so we can add it to the right instance group.
|
||||
func (i *Instances) Init(zl zoneLister) {
|
||||
i.zoneLister = zl
|
||||
}
|
||||
|
||||
// AddInstanceGroup creates or gets an instance group if it doesn't exist
|
||||
// and adds the given ports to it. Returns a list of one instance group per zone,
|
||||
// all of which have the exact same named ports.
|
||||
func (i *Instances) AddInstanceGroup(name string, ports []int64) ([]*compute.InstanceGroup, []*compute.NamedPort, error) {
|
||||
igs := []*compute.InstanceGroup{}
|
||||
namedPorts := []*compute.NamedPort{}
|
||||
for _, port := range ports {
|
||||
namedPorts = append(namedPorts, utils.GetNamedPort(port))
|
||||
}
|
||||
|
||||
zones, err := i.ListZones()
|
||||
if err != nil {
|
||||
return igs, namedPorts, err
|
||||
}
|
||||
|
||||
defer i.snapshotter.Add(name, struct{}{})
|
||||
for _, zone := range zones {
|
||||
ig, err := i.Get(name, zone)
|
||||
if err != nil && !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
glog.Errorf("Failed to get instance group %v/%v, err: %v", zone, name, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if ig == nil {
|
||||
glog.Infof("Creating instance group %v in zone %v", name, zone)
|
||||
if err = i.cloud.CreateInstanceGroup(&compute.InstanceGroup{Name: name}, zone); err != nil {
|
||||
// Error may come back with StatusConflict meaning the instance group was created by another controller
|
||||
// possibly the Service Controller for internal load balancers.
|
||||
if utils.IsHTTPErrorCode(err, http.StatusConflict) {
|
||||
glog.Warningf("Failed to create instance group %v/%v due to conflict status, but continuing sync. err: %v", zone, name, err)
|
||||
} else {
|
||||
glog.Errorf("Failed to create instance group %v/%v, err: %v", zone, name, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
ig, err = i.cloud.GetInstanceGroup(name, zone)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance group %v/%v after ensuring existence, err: %v", zone, name, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
glog.V(3).Infof("Instance group %v already exists in zone %v", name, zone)
|
||||
}
|
||||
|
||||
existingPorts := map[int64]bool{}
|
||||
for _, np := range ig.NamedPorts {
|
||||
existingPorts[np.Port] = true
|
||||
}
|
||||
var newPorts []*compute.NamedPort
|
||||
for _, np := range namedPorts {
|
||||
if existingPorts[np.Port] {
|
||||
glog.V(3).Infof("Instance group %v already has named port %+v", ig.Name, np)
|
||||
continue
|
||||
}
|
||||
newPorts = append(newPorts, np)
|
||||
}
|
||||
if len(newPorts) > 0 {
|
||||
glog.V(5).Infof("Instance group %v/%v does not have ports %+v, adding them now.", zone, name, namedPorts)
|
||||
if err := i.cloud.SetNamedPortsOfInstanceGroup(ig.Name, zone, append(ig.NamedPorts, namedPorts...)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
igs = append(igs, ig)
|
||||
}
|
||||
return igs, namedPorts, nil
|
||||
}
|
||||
|
||||
// DeleteInstanceGroup deletes the given IG by name, from all zones.
|
||||
func (i *Instances) DeleteInstanceGroup(name string) error {
|
||||
defer i.snapshotter.Delete(name)
|
||||
errs := []error{}
|
||||
|
||||
zones, err := i.ListZones()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, zone := range zones {
|
||||
if err := i.cloud.DeleteInstanceGroup(name, zone); err != nil {
|
||||
if utils.IsNotFoundError(err) {
|
||||
glog.V(3).Infof("Instance group %v in zone %v did not exist", name, zone)
|
||||
} else if utils.IsInUsedByError(err) {
|
||||
glog.V(3).Infof("Could not delete instance group %v in zone %v because it's still in use. Ignoring: %v", name, zone, err)
|
||||
} else {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
} else {
|
||||
glog.V(3).Infof("Deleted instance group %v in zone %v", name, zone)
|
||||
}
|
||||
}
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%v", errs)
|
||||
}
|
||||
|
||||
// list lists all instances in all zones.
|
||||
func (i *Instances) list(name string) (sets.String, error) {
|
||||
nodeNames := sets.NewString()
|
||||
zones, err := i.ListZones()
|
||||
if err != nil {
|
||||
return nodeNames, err
|
||||
}
|
||||
|
||||
for _, zone := range zones {
|
||||
instances, err := i.cloud.ListInstancesInInstanceGroup(
|
||||
name, zone, allInstances)
|
||||
if err != nil {
|
||||
return nodeNames, err
|
||||
}
|
||||
for _, ins := range instances.Items {
|
||||
// TODO: If round trips weren't so slow one would be inclided
|
||||
// to GetInstance using this url and get the name.
|
||||
parts := strings.Split(ins.Instance, "/")
|
||||
nodeNames.Insert(parts[len(parts)-1])
|
||||
}
|
||||
}
|
||||
return nodeNames, nil
|
||||
}
|
||||
|
||||
// Get returns the Instance Group by name.
|
||||
func (i *Instances) Get(name, zone string) (*compute.InstanceGroup, error) {
|
||||
ig, err := i.cloud.GetInstanceGroup(name, zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i.snapshotter.Add(name, struct{}{})
|
||||
return ig, nil
|
||||
}
|
||||
|
||||
// splitNodesByZones takes a list of node names and returns a map of zone:node names.
|
||||
// It figures out the zones by asking the zoneLister.
|
||||
func (i *Instances) splitNodesByZone(names []string) map[string][]string {
|
||||
nodesByZone := map[string][]string{}
|
||||
for _, name := range names {
|
||||
zone, err := i.GetZoneForNode(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get zones for %v: %v, skipping", name, err)
|
||||
continue
|
||||
}
|
||||
if _, ok := nodesByZone[zone]; !ok {
|
||||
nodesByZone[zone] = []string{}
|
||||
}
|
||||
nodesByZone[zone] = append(nodesByZone[zone], name)
|
||||
}
|
||||
return nodesByZone
|
||||
}
|
||||
|
||||
// Add adds the given instances to the appropriately zoned Instance Group.
|
||||
func (i *Instances) Add(groupName string, names []string) error {
|
||||
errs := []error{}
|
||||
for zone, nodeNames := range i.splitNodesByZone(names) {
|
||||
glog.V(1).Infof("Adding nodes %v to %v in zone %v", nodeNames, groupName, zone)
|
||||
if err := i.cloud.AddInstancesToInstanceGroup(groupName, zone, i.cloud.ToInstanceReferences(zone, nodeNames)); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%v", errs)
|
||||
}
|
||||
|
||||
// Remove removes the given instances from the appropriately zoned Instance Group.
|
||||
func (i *Instances) Remove(groupName string, names []string) error {
|
||||
errs := []error{}
|
||||
for zone, nodeNames := range i.splitNodesByZone(names) {
|
||||
glog.V(1).Infof("Removing nodes %v from %v in zone %v", nodeNames, groupName, zone)
|
||||
if err := i.cloud.RemoveInstancesFromInstanceGroup(groupName, zone, i.cloud.ToInstanceReferences(zone, nodeNames)); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%v", errs)
|
||||
}
|
||||
|
||||
// Sync syncs kubernetes instances with the instances in the instance group.
|
||||
func (i *Instances) Sync(nodes []string) (err error) {
|
||||
glog.V(4).Infof("Syncing nodes %v", nodes)
|
||||
|
||||
defer func() {
|
||||
// The node pool is only responsible for syncing nodes to instance
|
||||
// groups. It never creates/deletes, so if an instance groups is
|
||||
// not found there's nothing it can do about it anyway. Most cases
|
||||
// this will happen because the backend pool has deleted the instance
|
||||
// group, however if it happens because a user deletes the IG by mistake
|
||||
// we should just wait till the backend pool fixes it.
|
||||
if utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
glog.Infof("Node pool encountered a 404, ignoring: %v", err)
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
|
||||
pool := i.snapshotter.Snapshot()
|
||||
for igName := range pool {
|
||||
gceNodes := sets.NewString()
|
||||
gceNodes, err = i.list(igName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeNodes := sets.NewString(nodes...)
|
||||
|
||||
// A node deleted via kubernetes could still exist as a gce vm. We don't
|
||||
// want to route requests to it. Similarly, a node added to kubernetes
|
||||
// needs to get added to the instance group so we do route requests to it.
|
||||
|
||||
removeNodes := gceNodes.Difference(kubeNodes).List()
|
||||
addNodes := kubeNodes.Difference(gceNodes).List()
|
||||
if len(removeNodes) != 0 {
|
||||
glog.V(4).Infof("Removing nodes from IG: %v", removeNodes)
|
||||
if err = i.Remove(igName, removeNodes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(addNodes) != 0 {
|
||||
glog.V(4).Infof("Adding nodes to IG: %v", addNodes)
|
||||
if err = i.Add(igName, addNodes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package instances
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
const defaultZone = "default-zone"
|
||||
|
||||
func newNodePool(f *FakeInstanceGroups, zone string) NodePool {
|
||||
pool := NewNodePool(f)
|
||||
pool.Init(&FakeZoneLister{[]string{zone}})
|
||||
return pool
|
||||
}
|
||||
|
||||
func TestNodePoolSync(t *testing.T) {
|
||||
f := NewFakeInstanceGroups(sets.NewString(
|
||||
[]string{"n1", "n2"}...))
|
||||
pool := newNodePool(f, defaultZone)
|
||||
pool.AddInstanceGroup("test", []int64{80})
|
||||
|
||||
// KubeNodes: n1
|
||||
// GCENodes: n1, n2
|
||||
// Remove n2 from the instance group.
|
||||
|
||||
f.calls = []int{}
|
||||
kubeNodes := sets.NewString([]string{"n1"}...)
|
||||
pool.Sync(kubeNodes.List())
|
||||
if f.instances.Len() != kubeNodes.Len() || !kubeNodes.IsSuperset(f.instances) {
|
||||
t.Fatalf("%v != %v", kubeNodes, f.instances)
|
||||
}
|
||||
|
||||
// KubeNodes: n1, n2
|
||||
// GCENodes: n1
|
||||
// Try to add n2 to the instance group.
|
||||
|
||||
f = NewFakeInstanceGroups(sets.NewString([]string{"n1"}...))
|
||||
pool = newNodePool(f, defaultZone)
|
||||
pool.AddInstanceGroup("test", []int64{80})
|
||||
|
||||
f.calls = []int{}
|
||||
kubeNodes = sets.NewString([]string{"n1", "n2"}...)
|
||||
pool.Sync(kubeNodes.List())
|
||||
if f.instances.Len() != kubeNodes.Len() ||
|
||||
!kubeNodes.IsSuperset(f.instances) {
|
||||
t.Fatalf("%v != %v", kubeNodes, f.instances)
|
||||
}
|
||||
|
||||
// KubeNodes: n1, n2
|
||||
// GCENodes: n1, n2
|
||||
// Do nothing.
|
||||
|
||||
f = NewFakeInstanceGroups(sets.NewString([]string{"n1", "n2"}...))
|
||||
pool = newNodePool(f, defaultZone)
|
||||
pool.AddInstanceGroup("test", []int64{80})
|
||||
|
||||
f.calls = []int{}
|
||||
kubeNodes = sets.NewString([]string{"n1", "n2"}...)
|
||||
pool.Sync(kubeNodes.List())
|
||||
if len(f.calls) != 0 {
|
||||
t.Fatalf(
|
||||
"Did not expect any calls, got %+v", f.calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetNamedPorts(t *testing.T) {
|
||||
f := NewFakeInstanceGroups(sets.NewString(
|
||||
[]string{"ig"}...))
|
||||
pool := newNodePool(f, defaultZone)
|
||||
|
||||
testCases := []struct {
|
||||
newPorts []int64
|
||||
expectedPorts []int64
|
||||
}{
|
||||
{
|
||||
// Verify adding a port works as expected.
|
||||
[]int64{80},
|
||||
[]int64{80},
|
||||
},
|
||||
{
|
||||
// Verify adding multiple ports at once works as expected.
|
||||
[]int64{81, 82},
|
||||
[]int64{80, 81, 82},
|
||||
},
|
||||
{
|
||||
// Adding existing ports should have no impact.
|
||||
[]int64{80, 82},
|
||||
[]int64{80, 81, 82},
|
||||
},
|
||||
// TODO: Add tests to remove named ports when we support that.
|
||||
}
|
||||
for _, test := range testCases {
|
||||
igs, _, err := pool.AddInstanceGroup("ig", test.newPorts)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in adding ports %v to instance group: %s", test.newPorts, err)
|
||||
}
|
||||
if len(igs) != 1 {
|
||||
t.Fatalf("expected a single instance group, got: %v", igs)
|
||||
}
|
||||
actualPorts := igs[0].NamedPorts
|
||||
if len(actualPorts) != len(test.expectedPorts) {
|
||||
t.Fatalf("unexpected named ports on instance group. expected: %v, got: %v", test.expectedPorts, actualPorts)
|
||||
}
|
||||
for i, p := range igs[0].NamedPorts {
|
||||
if p.Port != test.expectedPorts[i] {
|
||||
t.Fatalf("unexpected named ports on instance group. expected: %v, got: %v", test.expectedPorts, actualPorts)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package instances
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// zoneLister manages lookups for GCE instance groups/instances to zones.
|
||||
type zoneLister interface {
|
||||
ListZones() ([]string, error)
|
||||
GetZoneForNode(name string) (string, error)
|
||||
}
|
||||
|
||||
// NodePool is an interface to manage a pool of kubernetes nodes synced with vm instances in the cloud
|
||||
// through the InstanceGroups interface. It handles zones opaquely using the zoneLister.
|
||||
type NodePool interface {
|
||||
Init(zl zoneLister)
|
||||
|
||||
// The following 2 methods operate on instance groups.
|
||||
AddInstanceGroup(name string, ports []int64) ([]*compute.InstanceGroup, []*compute.NamedPort, error)
|
||||
DeleteInstanceGroup(name string) error
|
||||
|
||||
// TODO: Refactor for modularity
|
||||
Add(groupName string, nodeNames []string) error
|
||||
Remove(groupName string, nodeNames []string) error
|
||||
Sync(nodeNames []string) error
|
||||
Get(name, zone string) (*compute.InstanceGroup, error)
|
||||
}
|
||||
|
||||
// InstanceGroups is an interface for managing gce instances groups, and the instances therein.
|
||||
type InstanceGroups interface {
|
||||
GetInstanceGroup(name, zone string) (*compute.InstanceGroup, error)
|
||||
CreateInstanceGroup(ig *compute.InstanceGroup, zone string) error
|
||||
DeleteInstanceGroup(name, zone string) error
|
||||
|
||||
// TODO: Refactor for modulatiry.
|
||||
ListInstancesInInstanceGroup(name, zone string, state string) (*compute.InstanceGroupsListInstances, error)
|
||||
AddInstancesToInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error
|
||||
RemoveInstancesFromInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error
|
||||
ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference)
|
||||
SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
package instances
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// Helper method to create instance groups.
|
||||
// This method exists to ensure that we are using the same logic at all places.
|
||||
func EnsureInstanceGroupsAndPorts(nodePool NodePool, namer *utils.Namer, ports []int64) ([]*compute.InstanceGroup, []*compute.NamedPort, error) {
|
||||
return nodePool.AddInstanceGroup(namer.IGName(), ports)
|
||||
}
|
|
@ -1,453 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loadbalancers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
var testIPManager = testIP{}
|
||||
|
||||
type testIP struct {
|
||||
start int
|
||||
}
|
||||
|
||||
func (t *testIP) ip() string {
|
||||
t.start++
|
||||
return fmt.Sprintf("0.0.0.%v", t.start)
|
||||
}
|
||||
|
||||
// Loadbalancer fakes
|
||||
|
||||
// FakeLoadBalancers is a type that fakes out the loadbalancer interface.
|
||||
type FakeLoadBalancers struct {
|
||||
Fw []*compute.ForwardingRule
|
||||
Um []*compute.UrlMap
|
||||
Tp []*compute.TargetHttpProxy
|
||||
Tps []*compute.TargetHttpsProxy
|
||||
IP []*compute.Address
|
||||
Certs []*compute.SslCertificate
|
||||
name string
|
||||
calls []string // list of calls that were made
|
||||
}
|
||||
|
||||
// TODO: There is some duplication between these functions and the name mungers in
|
||||
// loadbalancer file.
|
||||
func (f *FakeLoadBalancers) fwName(https bool) string {
|
||||
if https {
|
||||
return fmt.Sprintf("%v-%v", httpsForwardingRulePrefix, f.name)
|
||||
}
|
||||
return fmt.Sprintf("%v-%v", forwardingRulePrefix, f.name)
|
||||
}
|
||||
|
||||
func (f *FakeLoadBalancers) umName() string {
|
||||
return fmt.Sprintf("%v-%v", urlMapPrefix, f.name)
|
||||
}
|
||||
|
||||
func (f *FakeLoadBalancers) tpName(https bool) string {
|
||||
if https {
|
||||
return fmt.Sprintf("%v-%v", targetHTTPSProxyPrefix, f.name)
|
||||
}
|
||||
return fmt.Sprintf("%v-%v", targetProxyPrefix, f.name)
|
||||
}
|
||||
|
||||
// String is the string method for FakeLoadBalancers.
|
||||
func (f *FakeLoadBalancers) String() string {
|
||||
msg := fmt.Sprintf(
|
||||
"Loadbalancer %v,\nforwarding rules:\n", f.name)
|
||||
for _, fw := range f.Fw {
|
||||
msg += fmt.Sprintf("\t%v\n", fw.Name)
|
||||
}
|
||||
msg += fmt.Sprintf("Target proxies\n")
|
||||
for _, tp := range f.Tp {
|
||||
msg += fmt.Sprintf("\t%v\n", tp.Name)
|
||||
}
|
||||
msg += fmt.Sprintf("UrlMaps\n")
|
||||
for _, um := range f.Um {
|
||||
msg += fmt.Sprintf("%v\n", um.Name)
|
||||
msg += fmt.Sprintf("\tHost Rules:\n")
|
||||
for _, hostRule := range um.HostRules {
|
||||
msg += fmt.Sprintf("\t\t%v\n", hostRule)
|
||||
}
|
||||
msg += fmt.Sprintf("\tPath Matcher:\n")
|
||||
for _, pathMatcher := range um.PathMatchers {
|
||||
msg += fmt.Sprintf("\t\t%v\n", pathMatcher.Name)
|
||||
for _, pathRule := range pathMatcher.PathRules {
|
||||
msg += fmt.Sprintf("\t\t\t%+v\n", pathRule)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// Forwarding Rule fakes
|
||||
|
||||
// GetGlobalForwardingRule returns a fake forwarding rule.
|
||||
func (f *FakeLoadBalancers) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error) {
|
||||
f.calls = append(f.calls, "GetGlobalForwardingRule")
|
||||
for i := range f.Fw {
|
||||
if f.Fw[i].Name == name {
|
||||
return f.Fw[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("forwarding rule %v not found", name)
|
||||
}
|
||||
|
||||
// CreateGlobalForwardingRule fakes forwarding rule creation.
|
||||
func (f *FakeLoadBalancers) CreateGlobalForwardingRule(rule *compute.ForwardingRule) error {
|
||||
f.calls = append(f.calls, "CreateGlobalForwardingRule")
|
||||
if rule.IPAddress == "" {
|
||||
rule.IPAddress = fmt.Sprintf(testIPManager.ip())
|
||||
}
|
||||
rule.SelfLink = rule.Name
|
||||
f.Fw = append(f.Fw, rule)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetProxyForGlobalForwardingRule fakes setting a global forwarding rule.
|
||||
func (f *FakeLoadBalancers) SetProxyForGlobalForwardingRule(forwardingRuleName, proxyLink string) error {
|
||||
f.calls = append(f.calls, "SetProxyForGlobalForwardingRule")
|
||||
for i := range f.Fw {
|
||||
if f.Fw[i].Name == forwardingRuleName {
|
||||
f.Fw[i].Target = proxyLink
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteGlobalForwardingRule fakes deleting a global forwarding rule.
|
||||
func (f *FakeLoadBalancers) DeleteGlobalForwardingRule(name string) error {
|
||||
f.calls = append(f.calls, "DeleteGlobalForwardingRule")
|
||||
fw := []*compute.ForwardingRule{}
|
||||
for i := range f.Fw {
|
||||
if f.Fw[i].Name != name {
|
||||
fw = append(fw, f.Fw[i])
|
||||
}
|
||||
}
|
||||
f.Fw = fw
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetForwardingRulesWithIPs returns all forwarding rules that match the given ips.
|
||||
func (f *FakeLoadBalancers) GetForwardingRulesWithIPs(ip []string) (fwRules []*compute.ForwardingRule) {
|
||||
f.calls = append(f.calls, "GetForwardingRulesWithIPs")
|
||||
ipSet := sets.NewString(ip...)
|
||||
for i := range f.Fw {
|
||||
if ipSet.Has(f.Fw[i].IPAddress) {
|
||||
fwRules = append(fwRules, f.Fw[i])
|
||||
}
|
||||
}
|
||||
return fwRules
|
||||
}
|
||||
|
||||
// UrlMaps fakes
|
||||
|
||||
// GetUrlMap fakes getting url maps from the cloud.
|
||||
func (f *FakeLoadBalancers) GetUrlMap(name string) (*compute.UrlMap, error) {
|
||||
f.calls = append(f.calls, "GetUrlMap")
|
||||
for i := range f.Um {
|
||||
if f.Um[i].Name == name {
|
||||
return f.Um[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("url map %v not found", name)
|
||||
}
|
||||
|
||||
// CreateUrlMap fakes url-map creation.
|
||||
func (f *FakeLoadBalancers) CreateUrlMap(urlMap *compute.UrlMap) error {
|
||||
f.calls = append(f.calls, "CreateUrlMap")
|
||||
urlMap.SelfLink = f.umName()
|
||||
f.Um = append(f.Um, urlMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateUrlMap fakes updating url-maps.
|
||||
func (f *FakeLoadBalancers) UpdateUrlMap(urlMap *compute.UrlMap) error {
|
||||
f.calls = append(f.calls, "UpdateUrlMap")
|
||||
for i := range f.Um {
|
||||
if f.Um[i].Name == urlMap.Name {
|
||||
f.Um[i] = urlMap
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("url map %v not found", urlMap.Name)
|
||||
}
|
||||
|
||||
// DeleteUrlMap fakes url-map deletion.
|
||||
func (f *FakeLoadBalancers) DeleteUrlMap(name string) error {
|
||||
f.calls = append(f.calls, "DeleteUrlMap")
|
||||
um := []*compute.UrlMap{}
|
||||
for i := range f.Um {
|
||||
if f.Um[i].Name != name {
|
||||
um = append(um, f.Um[i])
|
||||
}
|
||||
}
|
||||
f.Um = um
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetProxies fakes
|
||||
|
||||
// GetTargetHttpProxy fakes getting target http proxies from the cloud.
|
||||
func (f *FakeLoadBalancers) GetTargetHttpProxy(name string) (*compute.TargetHttpProxy, error) {
|
||||
f.calls = append(f.calls, "GetTargetHttpProxy")
|
||||
for i := range f.Tp {
|
||||
if f.Tp[i].Name == name {
|
||||
return f.Tp[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("target http proxy %v not found", name)
|
||||
}
|
||||
|
||||
// CreateTargetHttpProxy fakes creating a target http proxy.
|
||||
func (f *FakeLoadBalancers) CreateTargetHttpProxy(proxy *compute.TargetHttpProxy) error {
|
||||
f.calls = append(f.calls, "CreateTargetHttpProxy")
|
||||
proxy.SelfLink = proxy.Name
|
||||
f.Tp = append(f.Tp, proxy)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTargetHttpProxy fakes deleting a target http proxy.
|
||||
func (f *FakeLoadBalancers) DeleteTargetHttpProxy(name string) error {
|
||||
f.calls = append(f.calls, "DeleteTargetHttpProxy")
|
||||
tp := []*compute.TargetHttpProxy{}
|
||||
for i := range f.Tp {
|
||||
if f.Tp[i].Name != name {
|
||||
tp = append(tp, f.Tp[i])
|
||||
}
|
||||
}
|
||||
f.Tp = tp
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUrlMapForTargetHttpProxy fakes setting an url-map for a target http proxy.
|
||||
func (f *FakeLoadBalancers) SetUrlMapForTargetHttpProxy(proxy *compute.TargetHttpProxy, urlMap *compute.UrlMap) error {
|
||||
f.calls = append(f.calls, "SetUrlMapForTargetHttpProxy")
|
||||
for i := range f.Tp {
|
||||
if f.Tp[i].Name == proxy.Name {
|
||||
f.Tp[i].UrlMap = urlMap.SelfLink
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetHttpsProxy fakes
|
||||
|
||||
// GetTargetHttpsProxy fakes getting target http proxies from the cloud.
|
||||
func (f *FakeLoadBalancers) GetTargetHttpsProxy(name string) (*compute.TargetHttpsProxy, error) {
|
||||
f.calls = append(f.calls, "GetTargetHttpsProxy")
|
||||
for i := range f.Tps {
|
||||
if f.Tps[i].Name == name {
|
||||
return f.Tps[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("target https proxy %v not found", name)
|
||||
}
|
||||
|
||||
// CreateTargetHttpsProxy fakes creating a target http proxy.
|
||||
func (f *FakeLoadBalancers) CreateTargetHttpsProxy(proxy *compute.TargetHttpsProxy) error {
|
||||
f.calls = append(f.calls, "CreateTargetHttpsProxy")
|
||||
proxy.SelfLink = proxy.Name
|
||||
f.Tps = append(f.Tps, proxy)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTargetHttpsProxy fakes deleting a target http proxy.
|
||||
func (f *FakeLoadBalancers) DeleteTargetHttpsProxy(name string) error {
|
||||
f.calls = append(f.calls, "DeleteTargetHttpsProxy")
|
||||
tp := []*compute.TargetHttpsProxy{}
|
||||
for i := range f.Tps {
|
||||
if f.Tps[i].Name != name {
|
||||
tp = append(tp, f.Tps[i])
|
||||
}
|
||||
}
|
||||
f.Tps = tp
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUrlMapForTargetHttpsProxy fakes setting an url-map for a target http proxy.
|
||||
func (f *FakeLoadBalancers) SetUrlMapForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, urlMap *compute.UrlMap) error {
|
||||
f.calls = append(f.calls, "SetUrlMapForTargetHttpsProxy")
|
||||
for i := range f.Tps {
|
||||
if f.Tps[i].Name == proxy.Name {
|
||||
f.Tps[i].UrlMap = urlMap.SelfLink
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetSslCertificateForTargetHttpsProxy fakes out setting certificates.
|
||||
func (f *FakeLoadBalancers) SetSslCertificateForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, SSLCert *compute.SslCertificate) error {
|
||||
f.calls = append(f.calls, "SetSslCertificateForTargetHttpsProxy")
|
||||
found := false
|
||||
for i := range f.Tps {
|
||||
if f.Tps[i].Name == proxy.Name {
|
||||
f.Tps[i].SslCertificates = []string{SSLCert.SelfLink}
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("failed to find proxy %v", proxy.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UrlMap fakes
|
||||
|
||||
// CheckURLMap checks the URL map.
|
||||
func (f *FakeLoadBalancers) CheckURLMap(t *testing.T, l7 *L7, expectedMap map[string]utils.FakeIngressRuleValueMap) {
|
||||
f.calls = append(f.calls, "CheckURLMap")
|
||||
um, err := f.GetUrlMap(l7.um.Name)
|
||||
if err != nil || um == nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
// Check the default backend
|
||||
var d string
|
||||
if h, ok := expectedMap[utils.DefaultBackendKey]; ok {
|
||||
if d, ok = h[utils.DefaultBackendKey]; ok {
|
||||
delete(h, utils.DefaultBackendKey)
|
||||
}
|
||||
delete(expectedMap, utils.DefaultBackendKey)
|
||||
}
|
||||
// The urlmap should have a default backend, and each path matcher.
|
||||
if d != "" && l7.um.DefaultService != d {
|
||||
t.Fatalf("Expected default backend %v found %v",
|
||||
d, l7.um.DefaultService)
|
||||
}
|
||||
|
||||
for _, matcher := range l7.um.PathMatchers {
|
||||
var hostname string
|
||||
// There's a 1:1 mapping between pathmatchers and hosts
|
||||
for _, hostRule := range l7.um.HostRules {
|
||||
if matcher.Name == hostRule.PathMatcher {
|
||||
if len(hostRule.Hosts) != 1 {
|
||||
t.Fatalf("Unexpected hosts in hostrules %+v", hostRule)
|
||||
}
|
||||
if d != "" && matcher.DefaultService != d {
|
||||
t.Fatalf("Expected default backend %v found %v",
|
||||
d, matcher.DefaultService)
|
||||
}
|
||||
hostname = hostRule.Hosts[0]
|
||||
break
|
||||
}
|
||||
}
|
||||
// These are all pathrules for a single host, found above
|
||||
for _, rule := range matcher.PathRules {
|
||||
if len(rule.Paths) != 1 {
|
||||
t.Fatalf("Unexpected rule in pathrules %+v", rule)
|
||||
}
|
||||
pathRule := rule.Paths[0]
|
||||
if hostMap, ok := expectedMap[hostname]; !ok {
|
||||
t.Fatalf("Expected map for host %v: %v", hostname, hostMap)
|
||||
} else if svc, ok := expectedMap[hostname][pathRule]; !ok {
|
||||
t.Fatalf("Expected rule %v in host map", pathRule)
|
||||
} else if svc != rule.Service {
|
||||
t.Fatalf("Expected service %v found %v", svc, rule.Service)
|
||||
}
|
||||
delete(expectedMap[hostname], pathRule)
|
||||
if len(expectedMap[hostname]) == 0 {
|
||||
delete(expectedMap, hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(expectedMap) != 0 {
|
||||
t.Fatalf("Untranslated entries %+v", expectedMap)
|
||||
}
|
||||
}
|
||||
|
||||
// Static IP fakes
|
||||
|
||||
// ReserveGlobalAddress fakes out static IP reservation.
|
||||
func (f *FakeLoadBalancers) ReserveGlobalAddress(addr *compute.Address) error {
|
||||
f.calls = append(f.calls, "ReserveGlobalAddress")
|
||||
f.IP = append(f.IP, addr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGlobalAddress fakes out static IP retrieval.
|
||||
func (f *FakeLoadBalancers) GetGlobalAddress(name string) (*compute.Address, error) {
|
||||
f.calls = append(f.calls, "GetGlobalAddress")
|
||||
for i := range f.IP {
|
||||
if f.IP[i].Name == name {
|
||||
return f.IP[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("static IP %v not found", name)
|
||||
}
|
||||
|
||||
// DeleteGlobalAddress fakes out static IP deletion.
|
||||
func (f *FakeLoadBalancers) DeleteGlobalAddress(name string) error {
|
||||
f.calls = append(f.calls, "DeleteGlobalAddress")
|
||||
ip := []*compute.Address{}
|
||||
for i := range f.IP {
|
||||
if f.IP[i].Name != name {
|
||||
ip = append(ip, f.IP[i])
|
||||
}
|
||||
}
|
||||
f.IP = ip
|
||||
return nil
|
||||
}
|
||||
|
||||
// SslCertificate fakes
|
||||
|
||||
// GetSslCertificate fakes out getting ssl certs.
|
||||
func (f *FakeLoadBalancers) GetSslCertificate(name string) (*compute.SslCertificate, error) {
|
||||
f.calls = append(f.calls, "GetSslCertificate")
|
||||
for i := range f.Certs {
|
||||
if f.Certs[i].Name == name {
|
||||
return f.Certs[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("cert %v not found", name)
|
||||
}
|
||||
|
||||
// CreateSslCertificate fakes out certificate creation.
|
||||
func (f *FakeLoadBalancers) CreateSslCertificate(cert *compute.SslCertificate) (*compute.SslCertificate, error) {
|
||||
f.calls = append(f.calls, "CreateSslCertificate")
|
||||
cert.SelfLink = cert.Name
|
||||
f.Certs = append(f.Certs, cert)
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// DeleteSslCertificate fakes out certificate deletion.
|
||||
func (f *FakeLoadBalancers) DeleteSslCertificate(name string) error {
|
||||
f.calls = append(f.calls, "DeleteSslCertificate")
|
||||
certs := []*compute.SslCertificate{}
|
||||
for i := range f.Certs {
|
||||
if f.Certs[i].Name != name {
|
||||
certs = append(certs, f.Certs[i])
|
||||
}
|
||||
}
|
||||
f.Certs = certs
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFakeLoadBalancers creates a fake cloud client. Name is the name
|
||||
// inserted into the selfLink of the associated resources for testing.
|
||||
// eg: forwardingRule.SelfLink == k8-fw-name.
|
||||
func NewFakeLoadBalancers(name string) *FakeLoadBalancers {
|
||||
return &FakeLoadBalancers{
|
||||
Fw: []*compute.ForwardingRule{},
|
||||
name: name,
|
||||
}
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loadbalancers
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// LoadBalancers is an interface for managing all the gce resources needed by L7
|
||||
// loadbalancers. We don't have individual pools for each of these resources
|
||||
// because none of them are usable (or acquirable) stand-alone, unlinke backends
|
||||
// and instance groups. The dependency graph:
|
||||
// ForwardingRule -> UrlMaps -> TargetProxies
|
||||
type LoadBalancers interface {
|
||||
// Forwarding Rules
|
||||
GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error)
|
||||
CreateGlobalForwardingRule(rule *compute.ForwardingRule) error
|
||||
DeleteGlobalForwardingRule(name string) error
|
||||
SetProxyForGlobalForwardingRule(fw, proxy string) error
|
||||
|
||||
// UrlMaps
|
||||
GetUrlMap(name string) (*compute.UrlMap, error)
|
||||
CreateUrlMap(urlMap *compute.UrlMap) error
|
||||
UpdateUrlMap(urlMap *compute.UrlMap) error
|
||||
DeleteUrlMap(name string) error
|
||||
|
||||
// TargetProxies
|
||||
GetTargetHttpProxy(name string) (*compute.TargetHttpProxy, error)
|
||||
CreateTargetHttpProxy(proxy *compute.TargetHttpProxy) error
|
||||
DeleteTargetHttpProxy(name string) error
|
||||
SetUrlMapForTargetHttpProxy(proxy *compute.TargetHttpProxy, urlMap *compute.UrlMap) error
|
||||
|
||||
// TargetHttpsProxies
|
||||
GetTargetHttpsProxy(name string) (*compute.TargetHttpsProxy, error)
|
||||
CreateTargetHttpsProxy(proxy *compute.TargetHttpsProxy) error
|
||||
DeleteTargetHttpsProxy(name string) error
|
||||
SetUrlMapForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, urlMap *compute.UrlMap) error
|
||||
SetSslCertificateForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, SSLCerts *compute.SslCertificate) error
|
||||
|
||||
// SslCertificates
|
||||
GetSslCertificate(name string) (*compute.SslCertificate, error)
|
||||
CreateSslCertificate(certs *compute.SslCertificate) (*compute.SslCertificate, error)
|
||||
DeleteSslCertificate(name string) error
|
||||
|
||||
// Static IP
|
||||
|
||||
ReserveGlobalAddress(addr *compute.Address) error
|
||||
GetGlobalAddress(name string) (*compute.Address, error)
|
||||
DeleteGlobalAddress(name string) error
|
||||
}
|
||||
|
||||
// LoadBalancerPool is an interface to manage the cloud resources associated
|
||||
// with a gce loadbalancer.
|
||||
type LoadBalancerPool interface {
|
||||
Get(name string) (*L7, error)
|
||||
Add(ri *L7RuntimeInfo) error
|
||||
Delete(name string) error
|
||||
Sync(ri []*L7RuntimeInfo) error
|
||||
GC(names []string) error
|
||||
Shutdown() error
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,478 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loadbalancers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultZone = "zone-a"
|
||||
)
|
||||
|
||||
var (
|
||||
testDefaultBeNodePort = backends.ServicePort{Port: 3000, Protocol: utils.ProtocolHTTP}
|
||||
)
|
||||
|
||||
func newFakeLoadBalancerPool(f LoadBalancers, t *testing.T) LoadBalancerPool {
|
||||
fakeBackends := backends.NewFakeBackendServices(func(op int, be *compute.BackendService) error { return nil })
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
fakeHCP := healthchecks.NewFakeHealthCheckProvider()
|
||||
namer := &utils.Namer{}
|
||||
healthChecker := healthchecks.NewHealthChecker(fakeHCP, "/", namer)
|
||||
nodePool := instances.NewNodePool(fakeIGs)
|
||||
nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}})
|
||||
backendPool := backends.NewBackendPool(
|
||||
fakeBackends, healthChecker, nodePool, namer, []int64{}, false)
|
||||
return NewLoadBalancerPool(f, backendPool, testDefaultBeNodePort, namer)
|
||||
}
|
||||
|
||||
func TestCreateHTTPLoadBalancer(t *testing.T) {
|
||||
// This should NOT create the forwarding rule and target proxy
|
||||
// associated with the HTTPS branch of this loadbalancer.
|
||||
lbInfo := &L7RuntimeInfo{Name: "test", AllowHTTP: true}
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
l7, err := pool.Get(lbInfo.Name)
|
||||
if err != nil || l7 == nil {
|
||||
t.Fatalf("Expected l7 not created")
|
||||
}
|
||||
um, err := f.GetUrlMap(f.umName())
|
||||
if err != nil ||
|
||||
um.DefaultService != pool.(*L7s).glbcDefaultBackend.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
tp, err := f.GetTargetHttpProxy(f.tpName(false))
|
||||
if err != nil || tp.UrlMap != um.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
fw, err := f.GetGlobalForwardingRule(f.fwName(false))
|
||||
if err != nil || fw.Target != tp.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateHTTPSLoadBalancer(t *testing.T) {
|
||||
// This should NOT create the forwarding rule and target proxy
|
||||
// associated with the HTTP branch of this loadbalancer.
|
||||
lbInfo := &L7RuntimeInfo{
|
||||
Name: "test",
|
||||
AllowHTTP: false,
|
||||
TLS: &TLSCerts{Key: "key", Cert: "cert"},
|
||||
}
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
l7, err := pool.Get(lbInfo.Name)
|
||||
if err != nil || l7 == nil {
|
||||
t.Fatalf("Expected l7 not created")
|
||||
}
|
||||
um, err := f.GetUrlMap(f.umName())
|
||||
if err != nil ||
|
||||
um.DefaultService != pool.(*L7s).glbcDefaultBackend.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
tps, err := f.GetTargetHttpsProxy(f.tpName(true))
|
||||
if err != nil || tps.UrlMap != um.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
fws, err := f.GetGlobalForwardingRule(f.fwName(true))
|
||||
if err != nil || fws.Target != tps.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that a certificate is created from the provided Key/Cert combo
|
||||
// and the proxy is updated to another cert when the provided cert changes
|
||||
func TestCertUpdate(t *testing.T) {
|
||||
primaryCertName := "k8s-ssl-test"
|
||||
secondaryCertName := "k8s-ssl-1-test"
|
||||
lbInfo := &L7RuntimeInfo{
|
||||
Name: "test",
|
||||
AllowHTTP: false,
|
||||
TLS: &TLSCerts{Key: "key", Cert: "cert"},
|
||||
}
|
||||
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
|
||||
// Sync first cert
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
verifyCertAndProxyLink(primaryCertName, lbInfo.TLS.Cert, f, t)
|
||||
|
||||
// Sync with different cert
|
||||
lbInfo.TLS = &TLSCerts{Key: "key2", Cert: "cert2"}
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
verifyCertAndProxyLink(secondaryCertName, lbInfo.TLS.Cert, f, t)
|
||||
}
|
||||
|
||||
// Tests that controller can overwrite existing, unused certificates
|
||||
func TestCertCreationWithCollision(t *testing.T) {
|
||||
primaryCertName := "k8s-ssl-test"
|
||||
secondaryCertName := "k8s-ssl-1-test"
|
||||
lbInfo := &L7RuntimeInfo{
|
||||
Name: "test",
|
||||
AllowHTTP: false,
|
||||
TLS: &TLSCerts{Key: "key", Cert: "cert"},
|
||||
}
|
||||
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
|
||||
// Have both names already used by orphaned certs
|
||||
f.CreateSslCertificate(&compute.SslCertificate{
|
||||
Name: primaryCertName,
|
||||
Certificate: "abc",
|
||||
SelfLink: "existing",
|
||||
})
|
||||
f.CreateSslCertificate(&compute.SslCertificate{
|
||||
Name: secondaryCertName,
|
||||
Certificate: "xyz",
|
||||
SelfLink: "existing",
|
||||
})
|
||||
|
||||
// Sync first cert
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
verifyCertAndProxyLink(primaryCertName, lbInfo.TLS.Cert, f, t)
|
||||
|
||||
// Sync with different cert
|
||||
lbInfo.TLS = &TLSCerts{Key: "key2", Cert: "cert2"}
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
verifyCertAndProxyLink(secondaryCertName, lbInfo.TLS.Cert, f, t)
|
||||
}
|
||||
|
||||
func TestCertRetentionAfterRestart(t *testing.T) {
|
||||
primaryCertName := "k8s-ssl-test"
|
||||
secondaryCertName := "k8s-ssl-1-test"
|
||||
lbInfo := &L7RuntimeInfo{
|
||||
Name: "test",
|
||||
AllowHTTP: false,
|
||||
TLS: &TLSCerts{Key: "key", Cert: "cert"},
|
||||
}
|
||||
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
firstPool := newFakeLoadBalancerPool(f, t)
|
||||
|
||||
// Sync twice so the expected certificate uses the secondary name
|
||||
firstPool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
verifyCertAndProxyLink(primaryCertName, lbInfo.TLS.Cert, f, t)
|
||||
lbInfo.TLS = &TLSCerts{Key: "key2", Cert: "cert2"}
|
||||
firstPool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
verifyCertAndProxyLink(secondaryCertName, lbInfo.TLS.Cert, f, t)
|
||||
|
||||
// Restart of controller represented by a new pool
|
||||
secondPool := newFakeLoadBalancerPool(f, t)
|
||||
secondPool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
|
||||
// Verify second name is still used
|
||||
verifyCertAndProxyLink(secondaryCertName, lbInfo.TLS.Cert, f, t)
|
||||
|
||||
// Update cert one more time to verify loop
|
||||
lbInfo.TLS = &TLSCerts{Key: "key3", Cert: "cert3"}
|
||||
secondPool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
verifyCertAndProxyLink(primaryCertName, lbInfo.TLS.Cert, f, t)
|
||||
|
||||
}
|
||||
|
||||
func verifyCertAndProxyLink(certName, certValue string, f *FakeLoadBalancers, t *testing.T) {
|
||||
cert, err := f.GetSslCertificate(certName)
|
||||
if err != nil {
|
||||
t.Fatalf("expected ssl certificate to exist: %v, err: %v", certName, err)
|
||||
}
|
||||
|
||||
if cert.Certificate != certValue {
|
||||
t.Fatalf("unexpected certificate value; expected %v, actual %v", certValue, cert.Certificate)
|
||||
}
|
||||
|
||||
tps, err := f.GetTargetHttpsProxy(f.tpName(true))
|
||||
if err != nil {
|
||||
t.Fatalf("expected https proxy to exist: %v, err: %v", certName, err)
|
||||
}
|
||||
|
||||
if len(tps.SslCertificates) == 0 || tps.SslCertificates[0] != cert.SelfLink {
|
||||
t.Fatalf("expected ssl certificate to be linked in target proxy; Cert Link: %q; Target Proxy Certs: %v", cert.SelfLink, tps.SslCertificates)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateHTTPSLoadBalancerAnnotationCert(t *testing.T) {
|
||||
// This should NOT create the forwarding rule and target proxy
|
||||
// associated with the HTTP branch of this loadbalancer.
|
||||
tlsName := "external-cert-name"
|
||||
lbInfo := &L7RuntimeInfo{
|
||||
Name: "test",
|
||||
AllowHTTP: false,
|
||||
TLSName: tlsName,
|
||||
}
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
f.CreateSslCertificate(&compute.SslCertificate{
|
||||
Name: tlsName,
|
||||
})
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
l7, err := pool.Get(lbInfo.Name)
|
||||
if err != nil || l7 == nil {
|
||||
t.Fatalf("Expected l7 not created")
|
||||
}
|
||||
um, err := f.GetUrlMap(f.umName())
|
||||
if err != nil ||
|
||||
um.DefaultService != pool.(*L7s).glbcDefaultBackend.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
tps, err := f.GetTargetHttpsProxy(f.tpName(true))
|
||||
if err != nil || tps.UrlMap != um.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
fws, err := f.GetGlobalForwardingRule(f.fwName(true))
|
||||
if err != nil || fws.Target != tps.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateBothLoadBalancers(t *testing.T) {
|
||||
// This should create 2 forwarding rules and target proxies
|
||||
// but they should use the same urlmap, and have the same
|
||||
// static ip.
|
||||
lbInfo := &L7RuntimeInfo{
|
||||
Name: "test",
|
||||
AllowHTTP: true,
|
||||
TLS: &TLSCerts{Key: "key", Cert: "cert"},
|
||||
}
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
l7, err := pool.Get(lbInfo.Name)
|
||||
if err != nil || l7 == nil {
|
||||
t.Fatalf("Expected l7 not created")
|
||||
}
|
||||
um, err := f.GetUrlMap(f.umName())
|
||||
if err != nil ||
|
||||
um.DefaultService != pool.(*L7s).glbcDefaultBackend.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
tps, err := f.GetTargetHttpsProxy(f.tpName(true))
|
||||
if err != nil || tps.UrlMap != um.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
tp, err := f.GetTargetHttpProxy(f.tpName(false))
|
||||
if err != nil || tp.UrlMap != um.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
fws, err := f.GetGlobalForwardingRule(f.fwName(true))
|
||||
if err != nil || fws.Target != tps.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
fw, err := f.GetGlobalForwardingRule(f.fwName(false))
|
||||
if err != nil || fw.Target != tp.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
ip, err := f.GetGlobalAddress(f.fwName(false))
|
||||
if err != nil || ip.Address != fw.IPAddress || ip.Address != fws.IPAddress {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateUrlMap(t *testing.T) {
|
||||
um1 := utils.GCEURLMap{
|
||||
"bar.example.com": {
|
||||
"/bar2": &compute.BackendService{SelfLink: "bar2svc"},
|
||||
},
|
||||
}
|
||||
um2 := utils.GCEURLMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": &compute.BackendService{SelfLink: "foo1svc"},
|
||||
"/foo2": &compute.BackendService{SelfLink: "foo2svc"},
|
||||
},
|
||||
"bar.example.com": {
|
||||
"/bar1": &compute.BackendService{SelfLink: "bar1svc"},
|
||||
},
|
||||
}
|
||||
um2.PutDefaultBackend(&compute.BackendService{SelfLink: "default"})
|
||||
|
||||
lbInfo := &L7RuntimeInfo{Name: "test", AllowHTTP: true}
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
l7, err := pool.Get(lbInfo.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
for _, ir := range []utils.GCEURLMap{um1, um2} {
|
||||
if err := l7.UpdateUrlMap(ir); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
// The final map doesn't contain /bar2
|
||||
expectedMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
utils.DefaultBackendKey: {
|
||||
utils.DefaultBackendKey: "default",
|
||||
},
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
"/foo2": "foo2svc",
|
||||
},
|
||||
"bar.example.com": {
|
||||
"/bar1": "bar1svc",
|
||||
},
|
||||
}
|
||||
f.CheckURLMap(t, l7, expectedMap)
|
||||
}
|
||||
|
||||
func TestUpdateUrlMapNoChanges(t *testing.T) {
|
||||
um1 := utils.GCEURLMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": &compute.BackendService{SelfLink: "foo1svc"},
|
||||
"/foo2": &compute.BackendService{SelfLink: "foo2svc"},
|
||||
},
|
||||
"bar.example.com": {
|
||||
"/bar1": &compute.BackendService{SelfLink: "bar1svc"},
|
||||
},
|
||||
}
|
||||
um1.PutDefaultBackend(&compute.BackendService{SelfLink: "default"})
|
||||
um2 := utils.GCEURLMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": &compute.BackendService{SelfLink: "foo1svc"},
|
||||
"/foo2": &compute.BackendService{SelfLink: "foo2svc"},
|
||||
},
|
||||
"bar.example.com": {
|
||||
"/bar1": &compute.BackendService{SelfLink: "bar1svc"},
|
||||
},
|
||||
}
|
||||
um2.PutDefaultBackend(&compute.BackendService{SelfLink: "default"})
|
||||
|
||||
lbInfo := &L7RuntimeInfo{Name: "test", AllowHTTP: true}
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
l7, err := pool.Get(lbInfo.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
for _, ir := range []utils.GCEURLMap{um1, um2} {
|
||||
if err := l7.UpdateUrlMap(ir); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
for _, call := range f.calls {
|
||||
if call == "UpdateUrlMap" {
|
||||
t.Errorf("UpdateUrlMap() should not have been called")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNameParsing(t *testing.T) {
|
||||
clusterName := "123"
|
||||
firewallName := clusterName
|
||||
namer := utils.NewNamer(clusterName, firewallName)
|
||||
fullName := namer.Truncate(fmt.Sprintf("%v-%v", forwardingRulePrefix, namer.LBName("testlb")))
|
||||
annotationsMap := map[string]string{
|
||||
fmt.Sprintf("%v/forwarding-rule", utils.K8sAnnotationPrefix): fullName,
|
||||
}
|
||||
components := namer.ParseName(GCEResourceName(annotationsMap, "forwarding-rule"))
|
||||
t.Logf("%+v", components)
|
||||
if components.ClusterName != clusterName {
|
||||
t.Errorf("Failed to parse cluster name from %v, expected %v got %v", fullName, clusterName, components.ClusterName)
|
||||
}
|
||||
resourceName := "fw"
|
||||
if components.Resource != resourceName {
|
||||
t.Errorf("Failed to parse resource from %v, expected %v got %v", fullName, resourceName, components.Resource)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterNameChange(t *testing.T) {
|
||||
lbInfo := &L7RuntimeInfo{
|
||||
Name: "test",
|
||||
TLS: &TLSCerts{Key: "key", Cert: "cert"},
|
||||
}
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
l7, err := pool.Get(lbInfo.Name)
|
||||
if err != nil || l7 == nil {
|
||||
t.Fatalf("Expected l7 not created")
|
||||
}
|
||||
um, err := f.GetUrlMap(f.umName())
|
||||
if err != nil ||
|
||||
um.DefaultService != pool.(*L7s).glbcDefaultBackend.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
tps, err := f.GetTargetHttpsProxy(f.tpName(true))
|
||||
if err != nil || tps.UrlMap != um.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
fws, err := f.GetGlobalForwardingRule(f.fwName(true))
|
||||
if err != nil || fws.Target != tps.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
newName := "newName"
|
||||
namer := pool.(*L7s).namer
|
||||
namer.SetClusterName(newName)
|
||||
f.name = fmt.Sprintf("%v--%v", lbInfo.Name, newName)
|
||||
|
||||
// Now the components should get renamed with the next suffix.
|
||||
pool.Sync([]*L7RuntimeInfo{lbInfo})
|
||||
l7, err = pool.Get(lbInfo.Name)
|
||||
if err != nil || namer.ParseName(l7.Name).ClusterName != newName {
|
||||
t.Fatalf("Expected L7 name to change.")
|
||||
}
|
||||
um, err = f.GetUrlMap(f.umName())
|
||||
if err != nil || namer.ParseName(um.Name).ClusterName != newName {
|
||||
t.Fatalf("Expected urlmap name to change.")
|
||||
}
|
||||
if err != nil ||
|
||||
um.DefaultService != pool.(*L7s).glbcDefaultBackend.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
tps, err = f.GetTargetHttpsProxy(f.tpName(true))
|
||||
if err != nil || tps.UrlMap != um.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
fws, err = f.GetGlobalForwardingRule(f.fwName(true))
|
||||
if err != nil || fws.Target != tps.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidClusterNameChange(t *testing.T) {
|
||||
namer := utils.NewNamer("test--123", "test--123")
|
||||
if got := namer.GetClusterName(); got != "123" {
|
||||
t.Fatalf("Expected name 123, got %v", got)
|
||||
}
|
||||
// A name with `--` should take the last token
|
||||
for _, testCase := range []struct{ newName, expected string }{
|
||||
{"foo--bar", "bar"},
|
||||
{"--", ""},
|
||||
{"", ""},
|
||||
{"foo--bar--com", "com"},
|
||||
} {
|
||||
namer.SetClusterName(testCase.newName)
|
||||
if got := namer.GetClusterName(); got != testCase.expected {
|
||||
t.Fatalf("Expected %q got %q", testCase.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,516 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
go_flag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
flag "github.com/spf13/pflag"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/controller"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/storage"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
)
|
||||
|
||||
// Entrypoint of GLBC. Example invocation:
|
||||
// 1. In a pod:
|
||||
// glbc --delete-all-on-quit
|
||||
// 2. Dry run (on localhost):
|
||||
// $ kubectl proxy --api-prefix="/"
|
||||
// $ glbc --proxy="http://localhost:proxyport"
|
||||
|
||||
const (
|
||||
// lbAPIPort is the port on which the loadbalancer controller serves a
|
||||
// minimal api (/healthz, /delete-all-and-quit etc).
|
||||
lbAPIPort = 8081
|
||||
|
||||
// A delimiter used for clarity in naming GCE resources.
|
||||
clusterNameDelimiter = "--"
|
||||
|
||||
// Arbitrarily chosen alphanumeric character to use in constructing resource
|
||||
// names, eg: to avoid cases where we end up with a name ending in '-'.
|
||||
alphaNumericChar = "0"
|
||||
|
||||
// Current docker image version. Only used in debug logging.
|
||||
imageVersion = "glbc:0.9.6"
|
||||
|
||||
// Key used to persist UIDs to configmaps.
|
||||
uidConfigMapName = "ingress-uid"
|
||||
|
||||
// Sleep interval to retry cloud client creation.
|
||||
cloudClientRetryInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
flags = flag.NewFlagSet(
|
||||
`glbc: glbc --running-in-cluster=false`,
|
||||
flag.ExitOnError)
|
||||
|
||||
clusterName = flags.String("cluster-uid", controller.DefaultClusterUID,
|
||||
`Optional, used to tag cluster wide, shared loadbalancer resources such
|
||||
as instance groups. Use this flag if you'd like to continue using the
|
||||
same resources across a pod restart. Note that this does not need to
|
||||
match the name of you Kubernetes cluster, it's just an arbitrary name
|
||||
used to tag/lookup cloud resources.`)
|
||||
|
||||
inCluster = flags.Bool("running-in-cluster", true,
|
||||
`Optional, if this controller is running in a kubernetes cluster, use the
|
||||
pod secrets for creating a Kubernetes client.`)
|
||||
|
||||
apiServerHost = flags.String("apiserver-host", "", "The address of the Kubernetes Apiserver "+
|
||||
"to connect to in the format of protocol://address:port, e.g., "+
|
||||
"http://localhost:8080. If not specified, the assumption is that the binary runs inside a "+
|
||||
"Kubernetes cluster and local discovery is attempted.")
|
||||
kubeConfigFile = flags.String("kubeconfig", "", "Path to kubeconfig file with authorization and master location information.")
|
||||
|
||||
// TODO: Consolidate this flag and running-in-cluster. People already use
|
||||
// the first one to mean "running in dev", unfortunately.
|
||||
useRealCloud = flags.Bool("use-real-cloud", false,
|
||||
`Optional, if set a real cloud client is created. Only matters with
|
||||
--running-in-cluster=false, i.e a real cloud is always used when this
|
||||
controller is running on a Kubernetes node.`)
|
||||
|
||||
resyncPeriod = flags.Duration("sync-period", 30*time.Second,
|
||||
`Relist and confirm cloud resources this often.`)
|
||||
|
||||
deleteAllOnQuit = flags.Bool("delete-all-on-quit", false,
|
||||
`If true, the controller will delete all Ingress and the associated
|
||||
external cloud resources as it's shutting down. Mostly used for
|
||||
testing. In normal environments the controller should only delete
|
||||
a loadbalancer if the associated Ingress is deleted.`)
|
||||
|
||||
defaultSvc = flags.String("default-backend-service", "kube-system/default-http-backend",
|
||||
`Service used to serve a 404 page for the default backend. Takes the form
|
||||
namespace/name. The controller uses the first node port of this Service for
|
||||
the default backend.`)
|
||||
|
||||
healthCheckPath = flags.String("health-check-path", "/",
|
||||
`Path used to health-check a backend service. All Services must serve
|
||||
a 200 page on this path. Currently this is only configurable globally.`)
|
||||
|
||||
watchNamespace = flags.String("watch-namespace", v1.NamespaceAll,
|
||||
`Namespace to watch for Ingress/Services/Endpoints.`)
|
||||
|
||||
verbose = flags.Bool("verbose", false,
|
||||
`If true, logs are displayed at V(4), otherwise V(2).`)
|
||||
|
||||
configFilePath = flags.String("config-file-path", "",
|
||||
`Path to a file containing the gce config. If left unspecified this
|
||||
controller only works with default zones.`)
|
||||
|
||||
healthzPort = flags.Int("healthz-port", lbAPIPort,
|
||||
`Port to run healthz server. Must match the health check port in yaml.`)
|
||||
)
|
||||
|
||||
func registerHandlers(lbc *controller.LoadBalancerController) {
|
||||
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
if err := lbc.CloudClusterManager.IsHealthy(); err != nil {
|
||||
w.WriteHeader(500)
|
||||
w.Write([]byte(fmt.Sprintf("Cluster unhealthy: %v", err)))
|
||||
return
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte("ok"))
|
||||
})
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
http.HandleFunc("/delete-all-and-quit", func(w http.ResponseWriter, r *http.Request) {
|
||||
// TODO: Retry failures during shutdown.
|
||||
lbc.Stop(true)
|
||||
})
|
||||
|
||||
glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", *healthzPort), nil))
|
||||
}
|
||||
|
||||
func handleSigterm(lbc *controller.LoadBalancerController, deleteAll bool) {
|
||||
// Multiple SIGTERMs will get dropped
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGTERM)
|
||||
<-signalChan
|
||||
glog.Infof("Received SIGTERM, shutting down")
|
||||
|
||||
// TODO: Better retires than relying on restartPolicy.
|
||||
exitCode := 0
|
||||
if err := lbc.Stop(deleteAll); err != nil {
|
||||
glog.Infof("Error during shutdown %v", err)
|
||||
exitCode = 1
|
||||
}
|
||||
glog.Infof("Exiting with %v", exitCode)
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
// main function for GLBC.
|
||||
func main() {
|
||||
// TODO: Add a healthz endpoint
|
||||
var err error
|
||||
var clusterManager *controller.ClusterManager
|
||||
|
||||
// TODO: We can simply parse all go flags with
|
||||
// flags.AddGoFlagSet(go_flag.CommandLine)
|
||||
// but that pollutes --help output with a ton of standard go flags.
|
||||
// We only really need a binary switch from light, v(2) logging to
|
||||
// heavier debug style V(4) logging, which we use --verbose for.
|
||||
flags.Parse(os.Args)
|
||||
|
||||
// Set glog verbosity levels, unconditionally set --alsologtostderr.
|
||||
go_flag.Lookup("logtostderr").Value.Set("true")
|
||||
if *verbose {
|
||||
go_flag.Set("v", "4")
|
||||
}
|
||||
glog.Infof("Starting GLBC image: %v, cluster name %v", imageVersion, *clusterName)
|
||||
if *defaultSvc == "" {
|
||||
glog.Fatalf("Please specify --default-backend")
|
||||
}
|
||||
|
||||
var config *rest.Config
|
||||
// Create kubeclient
|
||||
if *inCluster {
|
||||
if config, err = rest.InClusterConfig(); err != nil {
|
||||
glog.Fatalf("error creating client configuration: %v", err)
|
||||
}
|
||||
} else {
|
||||
if *apiServerHost == "" {
|
||||
glog.Fatalf("please specify the api server address using the flag --apiserver-host")
|
||||
}
|
||||
|
||||
config, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
&clientcmd.ClientConfigLoadingRules{ExplicitPath: *kubeConfigFile},
|
||||
&clientcmd.ConfigOverrides{
|
||||
ClusterInfo: clientcmdapi.Cluster{
|
||||
Server: *apiServerHost,
|
||||
},
|
||||
}).ClientConfig()
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating client configuration: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create client: %v.", err)
|
||||
}
|
||||
|
||||
// Wait for the default backend Service. There's no pretty way to do this.
|
||||
parts := strings.Split(*defaultSvc, "/")
|
||||
if len(parts) != 2 {
|
||||
glog.Fatalf("Default backend should take the form namespace/name: %v",
|
||||
*defaultSvc)
|
||||
}
|
||||
port, nodePort, err := getNodePort(kubeClient, parts[0], parts[1])
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not configure default backend %v: %v",
|
||||
*defaultSvc, err)
|
||||
}
|
||||
// The default backend is known to be HTTP
|
||||
defaultBackendNodePort := backends.ServicePort{
|
||||
Port: int64(nodePort),
|
||||
Protocol: utils.ProtocolHTTP,
|
||||
SvcName: types.NamespacedName{Namespace: parts[0], Name: parts[1]},
|
||||
SvcPort: intstr.FromInt(int(port)),
|
||||
}
|
||||
|
||||
var cloud *gce.GCECloud
|
||||
if *inCluster || *useRealCloud {
|
||||
// Create cluster manager
|
||||
namer, err := newNamer(kubeClient, *clusterName, controller.DefaultFirewallName)
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
// TODO: Make this more resilient. Currently we create the cloud client
|
||||
// and pass it through to all the pools. This makes unit testing easier.
|
||||
// However if the cloud client suddenly fails, we should try to re-create it
|
||||
// and continue.
|
||||
if *configFilePath != "" {
|
||||
glog.Infof("Reading config from path %v", configFilePath)
|
||||
config, err := os.Open(*configFilePath)
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
defer config.Close()
|
||||
cloud = getGCEClient(config)
|
||||
glog.Infof("Successfully loaded cloudprovider using config %q", configFilePath)
|
||||
} else {
|
||||
// While you might be tempted to refactor so we simply assing nil to the
|
||||
// config and only invoke getGCEClient once, that will not do the right
|
||||
// thing because a nil check against an interface isn't true in golang.
|
||||
cloud = getGCEClient(nil)
|
||||
glog.Infof("Created GCE client without a config file")
|
||||
}
|
||||
|
||||
clusterManager, err = controller.NewClusterManager(cloud, namer, defaultBackendNodePort, *healthCheckPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
} else {
|
||||
// Create fake cluster manager
|
||||
clusterManager = controller.NewFakeClusterManager(*clusterName, controller.DefaultFirewallName).ClusterManager
|
||||
}
|
||||
|
||||
ctx := controller.NewControllerContext(kubeClient, *watchNamespace, *resyncPeriod)
|
||||
|
||||
// Start loadbalancer controller
|
||||
lbc, err := controller.NewLoadBalancerController(kubeClient, ctx, clusterManager)
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
if clusterManager.ClusterNamer.GetClusterName() != "" {
|
||||
glog.V(3).Infof("Cluster name %+v", clusterManager.ClusterNamer.GetClusterName())
|
||||
}
|
||||
clusterManager.Init(&controller.GCETranslator{LoadBalancerController: lbc})
|
||||
go registerHandlers(lbc)
|
||||
go handleSigterm(lbc, *deleteAllOnQuit)
|
||||
|
||||
ctx.Start()
|
||||
lbc.Run()
|
||||
for {
|
||||
glog.Infof("Handled quit, awaiting pod deletion.")
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func newNamer(kubeClient kubernetes.Interface, clusterName string, fwName string) (*utils.Namer, error) {
|
||||
name, err := getClusterUID(kubeClient, clusterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fw_name, err := getFirewallName(kubeClient, fwName, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
namer := utils.NewNamer(name, fw_name)
|
||||
uidVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName)
|
||||
|
||||
// Start a goroutine to poll the cluster UID config map
|
||||
// We don't watch because we know exactly which configmap we want and this
|
||||
// controller already watches 5 other resources, so it isn't worth the cost
|
||||
// of another connection and complexity.
|
||||
go wait.Forever(func() {
|
||||
for _, key := range [...]string{storage.UidDataKey, storage.ProviderDataKey} {
|
||||
val, found, err := uidVault.Get(key)
|
||||
if err != nil {
|
||||
glog.Errorf("Can't read uidConfigMap %v", uidConfigMapName)
|
||||
} else if !found {
|
||||
errmsg := fmt.Sprintf("Can't read %v from uidConfigMap %v", key, uidConfigMapName)
|
||||
if key == storage.UidDataKey {
|
||||
glog.Errorf(errmsg)
|
||||
} else {
|
||||
glog.V(4).Infof(errmsg)
|
||||
}
|
||||
} else {
|
||||
|
||||
switch key {
|
||||
case storage.UidDataKey:
|
||||
if uid := namer.GetClusterName(); uid != val {
|
||||
glog.Infof("Cluster uid changed from %v -> %v", uid, val)
|
||||
namer.SetClusterName(val)
|
||||
}
|
||||
case storage.ProviderDataKey:
|
||||
if fw_name := namer.GetFirewallName(); fw_name != val {
|
||||
glog.Infof("Cluster firewall name changed from %v -> %v", fw_name, val)
|
||||
namer.SetFirewallName(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 5*time.Second)
|
||||
return namer, nil
|
||||
}
|
||||
|
||||
// useDefaultOrLookupVault returns either a 'default_name' or if unset, obtains a name from a ConfigMap.
|
||||
// The returned value follows this priority:
|
||||
// If the provided 'default_name' is not empty, that name is used.
|
||||
// This is effectively a client override via a command line flag.
|
||||
// else, check cfgVault with 'cm_key' as a key and if found, use the associated value
|
||||
// else, return an empty 'name' and pass along an error iff the configmap lookup is erroneous.
|
||||
func useDefaultOrLookupVault(cfgVault *storage.ConfigMapVault, cm_key, default_name string) (string, error) {
|
||||
if default_name != "" {
|
||||
glog.Infof("Using user provided %v %v", cm_key, default_name)
|
||||
// Don't save the uid in the vault, so users can rollback through
|
||||
// setting the accompany flag to ""
|
||||
return default_name, nil
|
||||
}
|
||||
val, found, err := cfgVault.Get(cm_key)
|
||||
if err != nil {
|
||||
// This can fail because of:
|
||||
// 1. No such config map - found=false, err=nil
|
||||
// 2. No such key in config map - found=false, err=nil
|
||||
// 3. Apiserver flake - found=false, err!=nil
|
||||
// It is not safe to proceed in 3.
|
||||
return "", fmt.Errorf("failed to retrieve %v: %v, returning empty name", cm_key, err)
|
||||
} else if !found {
|
||||
// Not found but safe to proceed.
|
||||
return "", nil
|
||||
}
|
||||
glog.Infof("Using %v = %q saved in ConfigMap", cm_key, val)
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// getFirewallName returns the firewall rule name to use for this cluster. For
|
||||
// backwards compatibility, the firewall name will default to the cluster UID.
|
||||
// Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name.
|
||||
// else, use the cluster UID as a backup (this retains backwards compatibility).
|
||||
func getFirewallName(kubeClient kubernetes.Interface, name, cluster_uid string) (string, error) {
|
||||
cfgVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName)
|
||||
if fw_name, err := useDefaultOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil {
|
||||
return "", err
|
||||
} else if fw_name != "" {
|
||||
return fw_name, cfgVault.Put(storage.ProviderDataKey, fw_name)
|
||||
} else {
|
||||
glog.Infof("Using cluster UID %v as firewall name", cluster_uid)
|
||||
return cluster_uid, cfgVault.Put(storage.ProviderDataKey, cluster_uid)
|
||||
}
|
||||
}
|
||||
|
||||
// getClusterUID returns the cluster UID. Rules for UID generation:
|
||||
// If the user specifies a --cluster-uid param it overwrites everything
|
||||
// else, check UID config map for a previously recorded uid
|
||||
// else, check if there are any working Ingresses
|
||||
// - remember that "" is the cluster uid
|
||||
// else, allocate a new uid
|
||||
func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error) {
|
||||
cfgVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName)
|
||||
if name, err := useDefaultOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil {
|
||||
return "", err
|
||||
} else if name != "" {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// Check if the cluster has an Ingress with ip
|
||||
ings, err := kubeClient.Extensions().Ingresses(metav1.NamespaceAll).List(metav1.ListOptions{
|
||||
LabelSelector: labels.Everything().String(),
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
namer := utils.Namer{}
|
||||
for _, ing := range ings.Items {
|
||||
if len(ing.Status.LoadBalancer.Ingress) != 0 {
|
||||
c := namer.ParseName(loadbalancers.GCEResourceName(ing.Annotations, "forwarding-rule"))
|
||||
if c.ClusterName != "" {
|
||||
return c.ClusterName, cfgVault.Put(storage.UidDataKey, c.ClusterName)
|
||||
}
|
||||
glog.Infof("Found a working Ingress, assuming uid is empty string")
|
||||
return "", cfgVault.Put(storage.UidDataKey, "")
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate new uid
|
||||
f, err := os.Open("/dev/urandom")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
b := make([]byte, 8)
|
||||
if _, err := f.Read(b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
uid := fmt.Sprintf("%x", b)
|
||||
return uid, cfgVault.Put(storage.UidDataKey, uid)
|
||||
}
|
||||
|
||||
// getNodePort waits for the Service, and returns it's first node port.
|
||||
func getNodePort(client kubernetes.Interface, ns, name string) (port, nodePort int32, err error) {
|
||||
var svc *v1.Service
|
||||
glog.V(3).Infof("Waiting for %v/%v", ns, name)
|
||||
wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
svc, err = client.Core().Services(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, p := range svc.Spec.Ports {
|
||||
if p.NodePort != 0 {
|
||||
port = p.Port
|
||||
nodePort = p.NodePort
|
||||
glog.V(3).Infof("Node port %v", nodePort)
|
||||
break
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func getGCEClient(config io.Reader) *gce.GCECloud {
|
||||
getConfigReader := func() io.Reader { return nil }
|
||||
|
||||
if config != nil {
|
||||
allConfig, err := ioutil.ReadAll(config)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error while reading entire config: %v", err)
|
||||
}
|
||||
glog.V(2).Infof("Using cloudprovider config file:\n%v ", string(allConfig))
|
||||
|
||||
getConfigReader = func() io.Reader {
|
||||
return bytes.NewReader(allConfig)
|
||||
}
|
||||
} else {
|
||||
glog.V(2).Infoln("No cloudprovider config file provided. Continuing with default values.")
|
||||
}
|
||||
|
||||
// Creating the cloud interface involves resolving the metadata server to get
|
||||
// an oauth token. If this fails, the token provider assumes it's not on GCE.
|
||||
// No errors are thrown. So we need to keep retrying till it works because
|
||||
// we know we're on GCE.
|
||||
for {
|
||||
cloudInterface, err := cloudprovider.GetCloudProvider("gce", getConfigReader())
|
||||
if err == nil {
|
||||
cloud := cloudInterface.(*gce.GCECloud)
|
||||
|
||||
// If this controller is scheduled on a node without compute/rw
|
||||
// it won't be allowed to list backends. We can assume that the
|
||||
// user has no need for Ingress in this case. If they grant
|
||||
// permissions to the node they will have to restart the controller
|
||||
// manually to re-create the client.
|
||||
if _, err = cloud.ListGlobalBackendServices(); err == nil || utils.IsHTTPErrorCode(err, http.StatusForbidden) {
|
||||
return cloud
|
||||
}
|
||||
glog.Warningf("Failed to list backend services, retrying: %v", err)
|
||||
} else {
|
||||
glog.Warningf("Failed to retrieve cloud interface, retrying: %v", err)
|
||||
}
|
||||
time.Sleep(cloudClientRetryInterval)
|
||||
}
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
# This must match the --default-backend-service argument of the l7 lb
|
||||
# controller and is required because GCE mandates a default backend.
|
||||
name: default-http-backend
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
spec:
|
||||
# The default backend must be of type NodePort.
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
k8s-app: glbc
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: l7-lb-controller
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
version: v0.9.6
|
||||
spec:
|
||||
# There should never be more than 1 controller alive simultaneously.
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: glbc
|
||||
version: v0.9.6
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
version: v0.9.6
|
||||
name: glbc
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 600
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- image: gcr.io/google_containers/glbc:0.9.6
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
name: l7-lb-controller
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
args:
|
||||
- --apiserver-host=http://localhost:8080
|
||||
- --default-backend-service=default/default-http-backend
|
||||
- --sync-period=300s
|
|
@ -1,199 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
const (
|
||||
// UidDataKey is the key used in config maps to store the UID.
|
||||
UidDataKey = "uid"
|
||||
// ProviderDataKey is the key used in config maps to store the Provider
|
||||
// UID which we use to ensure unique firewalls.
|
||||
ProviderDataKey = "provider-uid"
|
||||
)
|
||||
|
||||
// ConfigMapVault stores cluster UIDs in config maps.
|
||||
// It's a layer on top of ConfigMapStore that just implements the utils.uidVault
|
||||
// interface.
|
||||
type ConfigMapVault struct {
|
||||
storeLock sync.Mutex
|
||||
ConfigMapStore cache.Store
|
||||
namespace string
|
||||
name string
|
||||
}
|
||||
|
||||
// Get retrieves the value associated to the provided 'key' from the cluster config map.
|
||||
// If this method returns an error, it's guaranteed to be apiserver flake.
|
||||
// If the error is a not found error it sets the boolean to false and
|
||||
// returns and error of nil instead.
|
||||
func (c *ConfigMapVault) Get(key string) (string, bool, error) {
|
||||
keyStore := fmt.Sprintf("%v/%v", c.namespace, c.name)
|
||||
item, found, err := c.ConfigMapStore.GetByKey(keyStore)
|
||||
if err != nil || !found {
|
||||
return "", false, err
|
||||
}
|
||||
data := item.(*api_v1.ConfigMap).Data
|
||||
c.storeLock.Lock()
|
||||
defer c.storeLock.Unlock()
|
||||
if k, ok := data[key]; ok {
|
||||
return k, true, nil
|
||||
}
|
||||
glog.Infof("Found config map %v but it doesn't contain key %v: %+v", keyStore, key, data)
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// Put inserts a key/value pair in the cluster config map.
|
||||
// If the key already exists, the value provided is stored.
|
||||
func (c *ConfigMapVault) Put(key, val string) error {
|
||||
c.storeLock.Lock()
|
||||
defer c.storeLock.Unlock()
|
||||
apiObj := &api_v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.name,
|
||||
Namespace: c.namespace,
|
||||
},
|
||||
}
|
||||
cfgMapKey := fmt.Sprintf("%v/%v", c.namespace, c.name)
|
||||
|
||||
item, exists, err := c.ConfigMapStore.GetByKey(cfgMapKey)
|
||||
if err == nil && exists {
|
||||
data := item.(*api_v1.ConfigMap).Data
|
||||
existingVal, ok := data[key]
|
||||
if ok && existingVal == val {
|
||||
// duplicate, no need to update.
|
||||
return nil
|
||||
}
|
||||
data[key] = val
|
||||
apiObj.Data = data
|
||||
if existingVal != val {
|
||||
glog.Infof("Configmap %v has key %v but wrong value %v, updating to %v", cfgMapKey, key, existingVal, val)
|
||||
} else {
|
||||
glog.Infof("Configmap %v will be updated with %v = %v", cfgMapKey, key, val)
|
||||
}
|
||||
if err := c.ConfigMapStore.Update(apiObj); err != nil {
|
||||
return fmt.Errorf("failed to update %v: %v", cfgMapKey, err)
|
||||
}
|
||||
} else {
|
||||
apiObj.Data = map[string]string{key: val}
|
||||
if err := c.ConfigMapStore.Add(apiObj); err != nil {
|
||||
return fmt.Errorf("failed to add %v: %v", cfgMapKey, err)
|
||||
}
|
||||
}
|
||||
glog.Infof("Successfully stored key %v = %v in config map %v", key, val, cfgMapKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes the ConfigMapStore.
|
||||
func (c *ConfigMapVault) Delete() error {
|
||||
cfgMapKey := fmt.Sprintf("%v/%v", c.namespace, c.name)
|
||||
item, _, err := c.ConfigMapStore.GetByKey(cfgMapKey)
|
||||
if err == nil {
|
||||
return c.ConfigMapStore.Delete(item)
|
||||
}
|
||||
glog.Warningf("Couldn't find item %v in vault, unable to delete", cfgMapKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConfigMapVault creates a config map client.
|
||||
// This client is essentially meant to abstract out the details of
|
||||
// configmaps and the API, and just store/retrieve a single value, the cluster uid.
|
||||
func NewConfigMapVault(c kubernetes.Interface, uidNs, uidConfigMapName string) *ConfigMapVault {
|
||||
return &ConfigMapVault{
|
||||
ConfigMapStore: NewConfigMapStore(c),
|
||||
namespace: uidNs,
|
||||
name: uidConfigMapName}
|
||||
}
|
||||
|
||||
// NewFakeConfigMapVault is an implementation of the ConfigMapStore that doesn't
|
||||
// persist configmaps. Only used in testing.
|
||||
func NewFakeConfigMapVault(ns, name string) *ConfigMapVault {
|
||||
return &ConfigMapVault{
|
||||
ConfigMapStore: cache.NewStore(cache.MetaNamespaceKeyFunc),
|
||||
namespace: ns,
|
||||
name: name}
|
||||
}
|
||||
|
||||
// ConfigMapStore wraps the store interface. Implementations usually persist
|
||||
// contents of the store transparently.
|
||||
type ConfigMapStore interface {
|
||||
cache.Store
|
||||
}
|
||||
|
||||
// APIServerConfigMapStore only services Add and GetByKey from apiserver.
|
||||
// TODO: Implement all the other store methods and make this a write
|
||||
// through cache.
|
||||
type APIServerConfigMapStore struct {
|
||||
ConfigMapStore
|
||||
client kubernetes.Interface
|
||||
}
|
||||
|
||||
// Add adds the given config map to the apiserver's store.
|
||||
func (a *APIServerConfigMapStore) Add(obj interface{}) error {
|
||||
cfg := obj.(*api_v1.ConfigMap)
|
||||
_, err := a.client.Core().ConfigMaps(cfg.Namespace).Create(cfg)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update updates the existing config map object.
|
||||
func (a *APIServerConfigMapStore) Update(obj interface{}) error {
|
||||
cfg := obj.(*api_v1.ConfigMap)
|
||||
_, err := a.client.Core().ConfigMaps(cfg.Namespace).Update(cfg)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete deletes the existing config map object.
|
||||
func (a *APIServerConfigMapStore) Delete(obj interface{}) error {
|
||||
cfg := obj.(*api_v1.ConfigMap)
|
||||
return a.client.Core().ConfigMaps(cfg.Namespace).Delete(cfg.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// GetByKey returns the config map for a given key.
|
||||
// The key must take the form namespace/name.
|
||||
func (a *APIServerConfigMapStore) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
nsName := strings.Split(key, "/")
|
||||
if len(nsName) != 2 {
|
||||
return nil, false, fmt.Errorf("failed to get key %v, unexpecte format, expecting ns/name", key)
|
||||
}
|
||||
ns, name := nsName[0], nsName[1]
|
||||
cfg, err := a.client.Core().ConfigMaps(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// Translate not found errors to found=false, err=nil
|
||||
if errors.IsNotFound(err) {
|
||||
return nil, false, nil
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return cfg, true, nil
|
||||
}
|
||||
|
||||
// NewConfigMapStore returns a config map store capable of persisting updates
|
||||
// to apiserver.
|
||||
func NewConfigMapStore(c kubernetes.Interface) ConfigMapStore {
|
||||
return &APIServerConfigMapStore{ConfigMapStore: cache.NewStore(cache.MetaNamespaceKeyFunc), client: c}
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
api "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestConfigMapUID(t *testing.T) {
|
||||
vault := NewFakeConfigMapVault(api.NamespaceSystem, "ingress-uid")
|
||||
// Get value from an empty vault.
|
||||
val, exists, err := vault.Get(UidDataKey)
|
||||
if exists {
|
||||
t.Errorf("Got value from an empty vault")
|
||||
}
|
||||
|
||||
// Store empty value for UidDataKey.
|
||||
uid := ""
|
||||
vault.Put(UidDataKey, uid)
|
||||
val, exists, err = vault.Get(UidDataKey)
|
||||
if !exists || err != nil {
|
||||
t.Errorf("Failed to retrieve value from vault: %v", err)
|
||||
}
|
||||
if val != "" {
|
||||
t.Errorf("Failed to store empty string as a key in the vault")
|
||||
}
|
||||
|
||||
// Store actual value in key.
|
||||
storedVal := "newuid"
|
||||
vault.Put(UidDataKey, storedVal)
|
||||
val, exists, err = vault.Get(UidDataKey)
|
||||
if !exists || err != nil {
|
||||
t.Errorf("Failed to retrieve value from vault")
|
||||
} else if val != storedVal {
|
||||
t.Errorf("Failed to store empty string as a key in the vault")
|
||||
}
|
||||
|
||||
// Store second value which will have the affect of updating to Store
|
||||
// rather than adding.
|
||||
secondVal := "bar"
|
||||
vault.Put("foo", secondVal)
|
||||
val, exists, err = vault.Get("foo")
|
||||
if !exists || err != nil || val != secondVal {
|
||||
t.Errorf("Failed to retrieve second value from vault")
|
||||
}
|
||||
val, exists, err = vault.Get(UidDataKey)
|
||||
if !exists || err != nil || val != storedVal {
|
||||
t.Errorf("Failed to retrieve first value from vault")
|
||||
}
|
||||
|
||||
// Delete value.
|
||||
if err := vault.Delete(); err != nil {
|
||||
t.Errorf("Failed to delete uid %v", err)
|
||||
}
|
||||
if _, exists, _ := vault.Get(UidDataKey); exists {
|
||||
t.Errorf("Found uid but expected none after deletion")
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Storage backends used by the Ingress controller.
|
||||
// Ingress controllers require their own storage for the following reasons:
|
||||
// 1. There is only so much information we can pack into 64 chars allowed
|
||||
// by GCE for resource names.
|
||||
// 2. An Ingress controller cannot assume total control over a project, in
|
||||
// fact in a majority of cases (ubernetes, tests, multiple gke clusters in
|
||||
// same project) there *will* be multiple controllers in a project.
|
||||
// 3. If the Ingress controller pod is killed, an Ingress is deleted while
|
||||
// the pod is down, and then the controller is re-scheduled on another node,
|
||||
// it will leak resources. Note that this will happen today because
|
||||
// the only implemented storage backend is InMemoryPool.
|
||||
// 4. Listing from cloudproviders is really slow.
|
||||
|
||||
package storage
|
|
@ -1,146 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// Snapshotter is an interface capable of providing a consistent snapshot of
|
||||
// the underlying storage implementation of a pool. It does not guarantee
|
||||
// thread safety of snapshots, so they should be treated as read only unless
|
||||
// the implementation specifies otherwise.
|
||||
type Snapshotter interface {
|
||||
Snapshot() map[string]interface{}
|
||||
cache.ThreadSafeStore
|
||||
}
|
||||
|
||||
// InMemoryPool is used as a cache for cluster resource pools.
|
||||
type InMemoryPool struct {
|
||||
cache.ThreadSafeStore
|
||||
}
|
||||
|
||||
// Snapshot returns a read only copy of the k:v pairs in the store.
|
||||
// Caller beware: Violates traditional snapshot guarantees.
|
||||
func (p *InMemoryPool) Snapshot() map[string]interface{} {
|
||||
snap := map[string]interface{}{}
|
||||
for _, key := range p.ListKeys() {
|
||||
if item, ok := p.Get(key); ok {
|
||||
snap[key] = item
|
||||
}
|
||||
}
|
||||
return snap
|
||||
}
|
||||
|
||||
// NewInMemoryPool creates an InMemoryPool.
|
||||
func NewInMemoryPool() *InMemoryPool {
|
||||
return &InMemoryPool{
|
||||
cache.NewThreadSafeStore(cache.Indexers{}, cache.Indices{})}
|
||||
}
|
||||
|
||||
type keyFunc func(interface{}) (string, error)
|
||||
|
||||
type cloudLister interface {
|
||||
List() ([]interface{}, error)
|
||||
}
|
||||
|
||||
// CloudListingPool wraps InMemoryPool but relists from the cloud periodically.
|
||||
type CloudListingPool struct {
|
||||
// A lock to protect against concurrent mutation of the pool
|
||||
lock sync.Mutex
|
||||
// The pool that is re-populated via re-list from cloud, and written to
|
||||
// from controller
|
||||
*InMemoryPool
|
||||
// An interface that lists objects from the cloud.
|
||||
lister cloudLister
|
||||
// A function capable of producing a key for a given object.
|
||||
// This key must match the key used to store the same object in the user of
|
||||
// this cache.
|
||||
keyGetter keyFunc
|
||||
}
|
||||
|
||||
// ReplenishPool lists through the cloudLister and inserts into the pool. This
|
||||
// is especially useful in scenarios like deleting an Ingress while the
|
||||
// controller is restarting. As long as the resource exists in the shared
|
||||
// memory pool, it is visible to the caller and they can take corrective
|
||||
// actions, eg: backend pool deletes backends with non-matching node ports
|
||||
// in its sync method.
|
||||
func (c *CloudListingPool) ReplenishPool() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
glog.V(4).Infof("Replenishing pool")
|
||||
|
||||
// We must list with the lock, because the controller also lists through
|
||||
// Snapshot(). It's ok if the controller takes a snpshot, we list, we
|
||||
// delete, because we have delete based on the most recent state. Worst
|
||||
// case we thrash. It's not ok if we list, the controller lists and
|
||||
// creates a backend, and we delete that backend based on stale state.
|
||||
items, err := c.lister.List()
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to list: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for i := range items {
|
||||
key, err := c.keyGetter(items[i])
|
||||
if err != nil {
|
||||
glog.V(5).Infof("CloudListingPool: %v", err)
|
||||
continue
|
||||
}
|
||||
c.InMemoryPool.Add(key, items[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Snapshot just snapshots the underlying pool.
|
||||
func (c *CloudListingPool) Snapshot() map[string]interface{} {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.InMemoryPool.Snapshot()
|
||||
}
|
||||
|
||||
// Add simply adds to the underlying pool.
|
||||
func (c *CloudListingPool) Add(key string, obj interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.InMemoryPool.Add(key, obj)
|
||||
}
|
||||
|
||||
// Delete just deletes from underlying pool.
|
||||
func (c *CloudListingPool) Delete(key string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.InMemoryPool.Delete(key)
|
||||
}
|
||||
|
||||
// NewCloudListingPool replenishes the InMemoryPool through a background
|
||||
// goroutine that lists from the given cloudLister.
|
||||
func NewCloudListingPool(k keyFunc, lister cloudLister, relistPeriod time.Duration) *CloudListingPool {
|
||||
cl := &CloudListingPool{
|
||||
InMemoryPool: NewInMemoryPool(),
|
||||
lister: lister,
|
||||
keyGetter: k,
|
||||
}
|
||||
glog.V(4).Infof("Starting pool replenish goroutine")
|
||||
go wait.Until(cl.ReplenishPool, relistPeriod, make(chan struct{}))
|
||||
return cl
|
||||
}
|
|
@ -1,361 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const (
|
||||
// Add used to record additions in a sync pool.
|
||||
Add = iota
|
||||
// Remove used to record removals from a sync pool.
|
||||
Remove
|
||||
// Sync used to record syncs of a sync pool.
|
||||
Sync
|
||||
// Get used to record Get from a sync pool.
|
||||
Get
|
||||
// Create used to record creations in a sync pool.
|
||||
Create
|
||||
// Update used to record updates in a sync pool.
|
||||
Update
|
||||
// Delete used to record deltions from a sync pool.
|
||||
Delete
|
||||
// AddInstances used to record a call to AddInstances.
|
||||
AddInstances
|
||||
// RemoveInstances used to record a call to RemoveInstances.
|
||||
RemoveInstances
|
||||
|
||||
// This allows sharing of backends across loadbalancers.
|
||||
backendPrefix = "k8s-be"
|
||||
backendRegex = "k8s-be-([0-9]+).*"
|
||||
|
||||
// Prefix used for instance groups involved in L7 balancing.
|
||||
igPrefix = "k8s-ig"
|
||||
|
||||
// Suffix used in the l7 firewall rule. There is currently only one.
|
||||
// Note that this name is used by the cloudprovider lib that inserts its
|
||||
// own k8s-fw prefix.
|
||||
globalFirewallSuffix = "l7"
|
||||
|
||||
// A delimiter used for clarity in naming GCE resources.
|
||||
clusterNameDelimiter = "--"
|
||||
|
||||
// Arbitrarily chosen alphanumeric character to use in constructing resource
|
||||
// names, eg: to avoid cases where we end up with a name ending in '-'.
|
||||
alphaNumericChar = "0"
|
||||
|
||||
// Names longer than this are truncated, because of GCE restrictions.
|
||||
nameLenLimit = 62
|
||||
|
||||
// DefaultBackendKey is the key used to transmit the defaultBackend through
|
||||
// a urlmap. It's not a valid subdomain, and it is a catch all path.
|
||||
// TODO: Find a better way to transmit this, once we've decided on default
|
||||
// backend semantics (i.e do we want a default per host, per lb etc).
|
||||
DefaultBackendKey = "DefaultBackend"
|
||||
|
||||
// K8sAnnotationPrefix is the prefix used in annotations used to record
|
||||
// debug information in the Ingress annotations.
|
||||
K8sAnnotationPrefix = "ingress.kubernetes.io"
|
||||
|
||||
// ProtocolHTTP protocol for a service
|
||||
ProtocolHTTP AppProtocol = "HTTP"
|
||||
// ProtocolHTTPS protocol for a service
|
||||
ProtocolHTTPS AppProtocol = "HTTPS"
|
||||
)
|
||||
|
||||
type AppProtocol string
|
||||
|
||||
// Namer handles centralized naming for the cluster.
|
||||
type Namer struct {
|
||||
clusterName string
|
||||
firewallName string
|
||||
nameLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewNamer creates a new namer with a Cluster and Firewall name.
|
||||
func NewNamer(clusterName, firewallName string) *Namer {
|
||||
namer := &Namer{}
|
||||
namer.SetClusterName(clusterName)
|
||||
namer.SetFirewallName(firewallName)
|
||||
return namer
|
||||
}
|
||||
|
||||
// NameComponents is a struct representing the components of a a GCE resource
|
||||
// name constructed by the namer. The format of such a name is:
|
||||
// k8s-resource-<metadata, eg port>--uid
|
||||
type NameComponents struct {
|
||||
ClusterName, Resource, Metadata string
|
||||
}
|
||||
|
||||
// SetClusterName sets the UID/name of this cluster.
|
||||
func (n *Namer) SetClusterName(name string) {
|
||||
n.nameLock.Lock()
|
||||
defer n.nameLock.Unlock()
|
||||
if strings.Contains(name, clusterNameDelimiter) {
|
||||
tokens := strings.Split(name, clusterNameDelimiter)
|
||||
glog.Warningf("Given name %v contains %v, taking last token in: %+v", name, clusterNameDelimiter, tokens)
|
||||
name = tokens[len(tokens)-1]
|
||||
}
|
||||
glog.Infof("Changing cluster name from %v to %v", n.clusterName, name)
|
||||
n.clusterName = name
|
||||
}
|
||||
|
||||
// SetFirewallName sets the firewall name of this cluster.
|
||||
func (n *Namer) SetFirewallName(firewall_name string) {
|
||||
n.nameLock.Lock()
|
||||
defer n.nameLock.Unlock()
|
||||
if n.firewallName != firewall_name {
|
||||
glog.Infof("Changing firewall name from %v to %v", n.firewallName, firewall_name)
|
||||
n.firewallName = firewall_name
|
||||
}
|
||||
}
|
||||
|
||||
// GetClusterName returns the UID/name of this cluster.
|
||||
func (n *Namer) GetClusterName() string {
|
||||
n.nameLock.Lock()
|
||||
defer n.nameLock.Unlock()
|
||||
return n.clusterName
|
||||
}
|
||||
|
||||
// GetFirewallName returns the firewall name of this cluster.
|
||||
func (n *Namer) GetFirewallName() string {
|
||||
n.nameLock.Lock()
|
||||
defer n.nameLock.Unlock()
|
||||
// Retain backwards compatible behavior where firewallName == clusterName.
|
||||
if n.firewallName == "" {
|
||||
return n.clusterName
|
||||
} else {
|
||||
return n.firewallName
|
||||
}
|
||||
}
|
||||
|
||||
// Truncate truncates the given key to a GCE length limit.
|
||||
func (n *Namer) Truncate(key string) string {
|
||||
if len(key) > nameLenLimit {
|
||||
// GCE requires names to end with an albhanumeric, but allows characters
|
||||
// like '-', so make sure the trucated name ends legally.
|
||||
return fmt.Sprintf("%v%v", key[:nameLenLimit], alphaNumericChar)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func (n *Namer) decorateName(name string) string {
|
||||
clusterName := n.GetClusterName()
|
||||
if clusterName == "" {
|
||||
return name
|
||||
}
|
||||
return n.Truncate(fmt.Sprintf("%v%v%v", name, clusterNameDelimiter, clusterName))
|
||||
}
|
||||
|
||||
// ParseName parses the name of a resource generated by the namer.
|
||||
func (n *Namer) ParseName(name string) *NameComponents {
|
||||
l := strings.Split(name, clusterNameDelimiter)
|
||||
var uid, resource string
|
||||
if len(l) >= 2 {
|
||||
uid = l[len(l)-1]
|
||||
}
|
||||
c := strings.Split(name, "-")
|
||||
if len(c) >= 2 {
|
||||
resource = c[1]
|
||||
}
|
||||
return &NameComponents{
|
||||
ClusterName: uid,
|
||||
Resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// NameBelongsToCluster checks if a given name is tagged with this cluster's UID.
|
||||
func (n *Namer) NameBelongsToCluster(name string) bool {
|
||||
if !strings.HasPrefix(name, "k8s-") {
|
||||
return false
|
||||
}
|
||||
parts := strings.Split(name, clusterNameDelimiter)
|
||||
clusterName := n.GetClusterName()
|
||||
if len(parts) == 1 {
|
||||
if clusterName == "" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
if len(parts) > 2 {
|
||||
return false
|
||||
}
|
||||
return parts[1] == clusterName
|
||||
}
|
||||
|
||||
// BeName constructs the name for a backend.
|
||||
func (n *Namer) BeName(port int64) string {
|
||||
return n.decorateName(fmt.Sprintf("%v-%d", backendPrefix, port))
|
||||
}
|
||||
|
||||
// BePort retrieves the port from the given backend name.
|
||||
func (n *Namer) BePort(beName string) (string, error) {
|
||||
r, err := regexp.Compile(backendRegex)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
match := r.FindStringSubmatch(beName)
|
||||
if len(match) < 2 {
|
||||
return "", fmt.Errorf("unable to lookup port for %v", beName)
|
||||
}
|
||||
_, err = strconv.Atoi(match[1])
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unexpected regex match: %v", beName)
|
||||
}
|
||||
return match[1], nil
|
||||
}
|
||||
|
||||
// IGName constructs the name for an Instance Group.
|
||||
func (n *Namer) IGName() string {
|
||||
// Currently all ports are added to a single instance group.
|
||||
return n.decorateName(igPrefix)
|
||||
}
|
||||
|
||||
// FrSuffix constructs the glbc specific suffix for the FirewallRule.
|
||||
func (n *Namer) FrSuffix() string {
|
||||
firewallName := n.GetFirewallName()
|
||||
// The entire cluster only needs a single firewall rule.
|
||||
if firewallName == "" {
|
||||
return globalFirewallSuffix
|
||||
}
|
||||
return n.Truncate(fmt.Sprintf("%v%v%v", globalFirewallSuffix, clusterNameDelimiter, firewallName))
|
||||
}
|
||||
|
||||
// FrName constructs the full firewall rule name, this is the name assigned by
|
||||
// the cloudprovider lib + suffix from glbc, so we don't mix this rule with a
|
||||
// rule created for L4 loadbalancing.
|
||||
func (n *Namer) FrName(suffix string) string {
|
||||
return fmt.Sprintf("k8s-fw-%s", suffix)
|
||||
}
|
||||
|
||||
// LBName constructs a loadbalancer name from the given key. The key is usually
|
||||
// the namespace/name of a Kubernetes Ingress.
|
||||
func (n *Namer) LBName(key string) string {
|
||||
// TODO: Pipe the clusterName through, for now it saves code churn to just
|
||||
// grab it globally, especially since we haven't decided how to handle
|
||||
// namespace conflicts in the Ubernetes context.
|
||||
parts := strings.Split(key, clusterNameDelimiter)
|
||||
scrubbedName := strings.Replace(key, "/", "-", -1)
|
||||
clusterName := n.GetClusterName()
|
||||
if clusterName == "" || parts[len(parts)-1] == clusterName {
|
||||
return scrubbedName
|
||||
}
|
||||
return n.Truncate(fmt.Sprintf("%v%v%v", scrubbedName, clusterNameDelimiter, clusterName))
|
||||
}
|
||||
|
||||
// GCEURLMap is a nested map of hostname->path regex->backend
|
||||
type GCEURLMap map[string]map[string]*compute.BackendService
|
||||
|
||||
// GetDefaultBackend performs a destructive read and returns the default
|
||||
// backend of the urlmap.
|
||||
func (g GCEURLMap) GetDefaultBackend() *compute.BackendService {
|
||||
var d *compute.BackendService
|
||||
var exists bool
|
||||
if h, ok := g[DefaultBackendKey]; ok {
|
||||
if d, exists = h[DefaultBackendKey]; exists {
|
||||
delete(h, DefaultBackendKey)
|
||||
}
|
||||
delete(g, DefaultBackendKey)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// String implements the string interface for the GCEURLMap.
|
||||
func (g GCEURLMap) String() string {
|
||||
msg := ""
|
||||
for host, um := range g {
|
||||
msg += fmt.Sprintf("%v\n", host)
|
||||
for url, be := range um {
|
||||
msg += fmt.Sprintf("\t%v: ", url)
|
||||
if be == nil {
|
||||
msg += fmt.Sprintf("No backend\n")
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v\n", be.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// PutDefaultBackend performs a destructive write replacing the
|
||||
// default backend of the url map with the given backend.
|
||||
func (g GCEURLMap) PutDefaultBackend(d *compute.BackendService) {
|
||||
g[DefaultBackendKey] = map[string]*compute.BackendService{
|
||||
DefaultBackendKey: d,
|
||||
}
|
||||
}
|
||||
|
||||
// FakeGoogleAPINotFoundErr creates a NotFound error with type googleapi.Error
|
||||
func FakeGoogleAPINotFoundErr() *googleapi.Error {
|
||||
return &googleapi.Error{Code: 404}
|
||||
}
|
||||
|
||||
// IsHTTPErrorCode checks if the given error matches the given HTTP Error code.
|
||||
// For this to work the error must be a googleapi Error.
|
||||
func IsHTTPErrorCode(err error, code int) bool {
|
||||
apiErr, ok := err.(*googleapi.Error)
|
||||
return ok && apiErr.Code == code
|
||||
}
|
||||
|
||||
// IgnoreHTTPNotFound returns the passed err if it's not a GoogleAPI error
|
||||
// with a NotFound status code.
|
||||
func IgnoreHTTPNotFound(err error) error {
|
||||
if err != nil && IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// IsInUsedByError returns true if the resource is being used by another GCP resource
|
||||
func IsInUsedByError(err error) bool {
|
||||
apiErr, ok := err.(*googleapi.Error)
|
||||
if !ok || apiErr.Code != http.StatusBadRequest {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(apiErr.Message, "being used by")
|
||||
}
|
||||
|
||||
// IsNotFoundError returns true if the resource does not exist
|
||||
func IsNotFoundError(err error) bool {
|
||||
return IsHTTPErrorCode(err, http.StatusNotFound)
|
||||
}
|
||||
|
||||
// CompareLinks returns true if the 2 self links are equal.
|
||||
func CompareLinks(l1, l2 string) bool {
|
||||
// TODO: These can be partial links
|
||||
return l1 == l2 && l1 != ""
|
||||
}
|
||||
|
||||
// FakeIngressRuleValueMap is a convenience type used by multiple submodules
|
||||
// that share the same testing methods.
|
||||
type FakeIngressRuleValueMap map[string]string
|
||||
|
||||
// GetNamedPort creates the NamedPort API object for the given port.
|
||||
func GetNamedPort(port int64) *compute.NamedPort {
|
||||
// TODO: move port naming to namer
|
||||
return &compute.NamedPort{Name: fmt.Sprintf("port%v", port), Port: port}
|
||||
}
|
2
controllers/nginx/.gitignore
vendored
2
controllers/nginx/.gitignore
vendored
|
@ -1,2 +0,0 @@
|
|||
rootfs/nginx-ingress-controller
|
||||
*/**/.coverprofile
|
|
@ -1,136 +0,0 @@
|
|||
all: push
|
||||
|
||||
BUILDTAGS=
|
||||
|
||||
# Use the 0.0 tag for testing, it shouldn't clobber any release builds
|
||||
TAG?=0.9.0-beta.15
|
||||
REGISTRY?=gcr.io/google_containers
|
||||
GOOS?=linux
|
||||
DOCKER?=gcloud docker --
|
||||
SED_I?=sed -i
|
||||
GOHOSTOS ?= $(shell go env GOHOSTOS)
|
||||
|
||||
ifeq ($(GOHOSTOS),darwin)
|
||||
SED_I=sed -i ''
|
||||
endif
|
||||
|
||||
REPO_INFO=$(shell git config --get remote.origin.url)
|
||||
|
||||
ifndef COMMIT
|
||||
COMMIT := git-$(shell git rev-parse --short HEAD)
|
||||
endif
|
||||
|
||||
PKG=k8s.io/ingress/controllers/nginx
|
||||
|
||||
ARCH ?= $(shell go env GOARCH)
|
||||
GOARCH = ${ARCH}
|
||||
DUMB_ARCH = ${ARCH}
|
||||
|
||||
ALL_ARCH = amd64 arm arm64 ppc64le
|
||||
|
||||
QEMUVERSION=v2.9.1
|
||||
|
||||
IMGNAME = nginx-ingress-controller
|
||||
IMAGE = $(REGISTRY)/$(IMGNAME)
|
||||
MULTI_ARCH_IMG = $(IMAGE)-$(ARCH)
|
||||
|
||||
# Set default base image dynamically for each arch
|
||||
BASEIMAGE?=gcr.io/google_containers/nginx-slim-$(ARCH):0.25
|
||||
|
||||
ifeq ($(ARCH),arm)
|
||||
QEMUARCH=arm
|
||||
GOARCH=arm
|
||||
DUMB_ARCH=armhf
|
||||
endif
|
||||
ifeq ($(ARCH),arm64)
|
||||
QEMUARCH=aarch64
|
||||
endif
|
||||
ifeq ($(ARCH),ppc64le)
|
||||
QEMUARCH=ppc64le
|
||||
GOARCH=ppc64le
|
||||
DUMB_ARCH=ppc64el
|
||||
endif
|
||||
#ifeq ($(ARCH),s390x)
|
||||
# QEMUARCH=s390x
|
||||
#endif
|
||||
|
||||
TEMP_DIR := $(shell mktemp -d)
|
||||
|
||||
DOCKERFILE := $(TEMP_DIR)/rootfs/Dockerfile
|
||||
|
||||
all: all-container
|
||||
|
||||
sub-container-%:
|
||||
$(MAKE) ARCH=$* build container
|
||||
|
||||
sub-push-%:
|
||||
$(MAKE) ARCH=$* push
|
||||
|
||||
all-container: $(addprefix sub-container-,$(ALL_ARCH))
|
||||
|
||||
all-push: $(addprefix sub-push-,$(ALL_ARCH))
|
||||
|
||||
container: .container-$(ARCH)
|
||||
.container-$(ARCH):
|
||||
cp -r ./* $(TEMP_DIR)
|
||||
$(SED_I) 's|BASEIMAGE|$(BASEIMAGE)|g' $(DOCKERFILE)
|
||||
$(SED_I) "s|QEMUARCH|$(QEMUARCH)|g" $(DOCKERFILE)
|
||||
$(SED_I) "s|DUMB_ARCH|$(DUMB_ARCH)|g" $(DOCKERFILE)
|
||||
|
||||
ifeq ($(ARCH),amd64)
|
||||
# When building "normally" for amd64, remove the whole line, it has no part in the amd64 image
|
||||
$(SED_I) "/CROSS_BUILD_/d" $(DOCKERFILE)
|
||||
else
|
||||
# When cross-building, only the placeholder "CROSS_BUILD_" should be removed
|
||||
# Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel
|
||||
$(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)/rootfs
|
||||
$(SED_I) "s/CROSS_BUILD_//g" $(DOCKERFILE)
|
||||
endif
|
||||
|
||||
$(DOCKER) build -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR)/rootfs
|
||||
|
||||
ifeq ($(ARCH), amd64)
|
||||
# This is for to maintain the backward compatibility
|
||||
$(DOCKER) tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):$(TAG)
|
||||
endif
|
||||
|
||||
push: .push-$(ARCH)
|
||||
.push-$(ARCH):
|
||||
$(DOCKER) push $(MULTI_ARCH_IMG):$(TAG)
|
||||
ifeq ($(ARCH), amd64)
|
||||
$(DOCKER) push $(IMAGE):$(TAG)
|
||||
endif
|
||||
|
||||
clean:
|
||||
$(DOCKER) rmi -f $(MULTI_ARCH_IMG):$(TAG) || true
|
||||
|
||||
build: clean
|
||||
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -a -installsuffix cgo \
|
||||
-ldflags "-s -w -X ${PKG}/pkg/version.RELEASE=${TAG} -X ${PKG}/pkg/version.COMMIT=${COMMIT} -X ${PKG}/pkg/version.REPO=${REPO_INFO}" \
|
||||
-o ${TEMP_DIR}/rootfs/nginx-ingress-controller ${PKG}/pkg/cmd/controller
|
||||
|
||||
fmt:
|
||||
@echo "+ $@"
|
||||
@go list -f '{{if len .TestGoFiles}}"gofmt -s -l {{.Dir}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c
|
||||
|
||||
lint:
|
||||
@echo "+ $@"
|
||||
@go list -f '{{if len .TestGoFiles}}"golint {{.Dir}}/..."{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c
|
||||
|
||||
test: fmt lint vet
|
||||
@echo "+ $@"
|
||||
@go test -v -race -tags "$(BUILDTAGS) cgo" $(shell go list ${PKG}/... | grep -v vendor)
|
||||
|
||||
cover:
|
||||
@echo "+ $@"
|
||||
@go list -f '{{if len .TestGoFiles}}"go test -coverprofile={{.Dir}}/.coverprofile {{.ImportPath}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c
|
||||
gover
|
||||
goveralls -coverprofile=gover.coverprofile -service travis-ci -repotoken ${COVERALLS_TOKEN}
|
||||
|
||||
vet:
|
||||
@echo "+ $@"
|
||||
@go vet $(shell go list ${PKG}/... | grep -v vendor)
|
||||
|
||||
release: all-container all-push
|
||||
echo "done"
|
|
@ -1,553 +0,0 @@
|
|||
# Nginx Ingress Controller
|
||||
|
||||
This is an nginx Ingress controller that uses [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configmap/#understanding-configmaps) to store the nginx configuration. See [Ingress controller documentation](../README.md) for details on how it works.
|
||||
|
||||
## Contents
|
||||
* [Conventions](#conventions)
|
||||
* [Requirements](#requirements)
|
||||
* [Command line arguments](#command-line-arguments)
|
||||
* [Dry running](#try-running-the-ingress-controller)
|
||||
* [Deployment](#deployment)
|
||||
* [HTTP](#http)
|
||||
* [HTTPS](#https)
|
||||
* [Default SSL Certificate](#default-ssl-certificate)
|
||||
* [HTTPS enforcement](#server-side-https-enforcement)
|
||||
* [HSTS](#http-strict-transport-security)
|
||||
* [Kube-Lego](#automated-certificate-management-with-kube-lego)
|
||||
* [Source IP address](#source-ip-address)
|
||||
* [TCP Services](#exposing-tcp-services)
|
||||
* [UDP Services](#exposing-udp-services)
|
||||
* [Proxy Protocol](#proxy-protocol)
|
||||
* [Opentracing](#opentracing)
|
||||
* [NGINX customization](configuration.md)
|
||||
* [Custom errors](#custom-errors)
|
||||
* [NGINX status page](#nginx-status-page)
|
||||
* [Running multiple ingress controllers](#running-multiple-ingress-controllers)
|
||||
* [Running on Cloudproviders](#running-on-cloudproviders)
|
||||
* [Disabling NGINX ingress controller](#disabling-nginx-ingress-controller)
|
||||
* [Log format](#log-format)
|
||||
* [Local cluster](#local-cluster)
|
||||
* [Debug & Troubleshooting](#debug--troubleshooting)
|
||||
* [Limitations](#limitations)
|
||||
* [Why endpoints and not services?](#why-endpoints-and-not-services)
|
||||
* [NGINX Notes](#nginx-notes)
|
||||
|
||||
## Conventions
|
||||
|
||||
Anytime we reference a tls secret, we mean (x509, pem encoded, RSA 2048, etc). You can generate such a certificate with:
|
||||
`openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${KEY_FILE} -out ${CERT_FILE} -subj "/CN=${HOST}/O=${HOST}"`
|
||||
and create the secret via `kubectl create secret tls ${CERT_NAME} --key ${KEY_FILE} --cert ${CERT_FILE}`
|
||||
|
||||
|
||||
|
||||
## Requirements
|
||||
- Default backend [404-server](https://github.com/kubernetes/ingress/tree/master/images/404-server)
|
||||
|
||||
|
||||
## Command line arguments
|
||||
```
|
||||
Usage of :
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--apiserver-host string The address of the Kubernetes Apiserver to connect to in the format of protocol://address:port, e.g., http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted.
|
||||
--configmap string Name of the ConfigMap that contains the custom configuration to use
|
||||
--default-backend-service string Service used to serve a 404 page for the default backend. Takes the form
|
||||
namespace/name. The controller uses the first node port of this Service for
|
||||
the default backend.
|
||||
--default-server-port int Default port to use for exposing the default server (catch all) (default 8181)
|
||||
--default-ssl-certificate string Name of the secret
|
||||
that contains a SSL certificate to be used as default for a HTTPS catch-all server
|
||||
--disable-node-list Disable querying nodes. If --force-namespace-isolation is true, this should also be set.
|
||||
--election-id string Election id to use for status update. (default "ingress-controller-leader")
|
||||
--enable-ssl-passthrough Enable SSL passthrough feature. Default is disabled
|
||||
--force-namespace-isolation Force namespace isolation. This flag is required to avoid the reference of secrets or
|
||||
configmaps located in a different namespace than the specified in the flag --watch-namespace.
|
||||
--health-check-path string Defines
|
||||
the URL to be used as health check inside in the default server in NGINX. (default "/healthz")
|
||||
--healthz-port int port for healthz endpoint. (default 10254)
|
||||
--http-port int Indicates the port to use for HTTP traffic (default 80)
|
||||
--https-port int Indicates the port to use for HTTPS traffic (default 443)
|
||||
--ingress-class string Name of the ingress class to route through this controller.
|
||||
--kubeconfig string Path to kubeconfig file with authorization and master location information.
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--profiling Enable profiling via web interface host:port/debug/pprof/ (default true)
|
||||
--publish-service string Service fronting the ingress controllers. Takes the form
|
||||
namespace/name. The controller will set the endpoint records on the
|
||||
ingress objects to reflect those on the service.
|
||||
--sort-backends Defines if backends and it's endpoints should be sorted
|
||||
--ssl-passtrough-proxy-port int Default port to use internally for SSL when SSL Passthgough is enabled (default 442)
|
||||
--status-port int Indicates the TCP port to use for exposing the nginx status page (default 18080)
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
--sync-period duration Relist and confirm cloud resources this often. Default is 10 minutes (default 10m0s)
|
||||
--tcp-services-configmap string Name of the ConfigMap that contains the definition of the TCP services to expose.
|
||||
The key in the map indicates the external port to be used. The value is the name of the
|
||||
service with the format namespace/serviceName and the port of the service could be a
|
||||
number of the name of the port.
|
||||
The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend
|
||||
--udp-services-configmap string Name of the ConfigMap that contains the definition of the UDP services to expose.
|
||||
The key in the map indicates the external port to be used. The value is the name of the
|
||||
service with the format namespace/serviceName and the port of the service could be a
|
||||
number of the name of the port.
|
||||
--update-status Indicates if the
|
||||
ingress controller should update the Ingress status IP/hostname. Default is true (default true)
|
||||
--update-status-on-shutdown Indicates if the
|
||||
ingress controller should update the Ingress status IP/hostname when the controller
|
||||
is being stopped. Default is true (default true)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
--watch-namespace string Namespace to watch for Ingress. Default is to watch all namespaces
|
||||
```
|
||||
|
||||
## Try running the Ingress controller
|
||||
|
||||
Before deploying the controller to production you might want to run it outside the cluster and observe it.
|
||||
|
||||
```console
|
||||
$ make build
|
||||
$ mkdir /etc/nginx-ssl
|
||||
$ ./rootfs/nginx-ingress-controller --running-in-cluster=false --default-backend-service=kube-system/default-http-backend
|
||||
```
|
||||
|
||||
## Deployment
|
||||
|
||||
First create a default backend and it's corresponding service:
|
||||
```
|
||||
$ kubectl create -f examples/default-backend.yaml
|
||||
```
|
||||
|
||||
Follow the [example-deployment](../../examples/deployment/nginx/README.md) steps to deploy nginx-ingress-controller in Kubernetes cluster (you may prefer other type of workloads, like Daemonset, in production environment).
|
||||
Loadbalancers are created via a ReplicationController or Daemonset:
|
||||
|
||||
|
||||
## HTTP
|
||||
|
||||
First we need to deploy some application to publish. To keep this simple we will use the [echoheaders app](https://github.com/kubernetes/contrib/blob/master/ingress/echoheaders/echo-app.yaml) that just returns information about the http request as output
|
||||
```
|
||||
kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.8 --replicas=1 --port=8080
|
||||
```
|
||||
|
||||
Now we expose the same application in two different services (so we can create different Ingress rules)
|
||||
```
|
||||
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x
|
||||
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-y
|
||||
```
|
||||
|
||||
Next we create a couple of Ingress rules
|
||||
```
|
||||
kubectl create -f examples/ingress.yaml
|
||||
```
|
||||
|
||||
we check that ingress rules are defined:
|
||||
```
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
echomap -
|
||||
foo.bar.com
|
||||
/foo echoheaders-x:80
|
||||
bar.baz.com
|
||||
/bar echoheaders-y:80
|
||||
/foo echoheaders-x:80
|
||||
```
|
||||
|
||||
Before the deploy of the Ingress controller we need a default backend [404-server](https://github.com/kubernetes/contrib/tree/master/404-server)
|
||||
```
|
||||
kubectl create -f examples/default-backend.yaml
|
||||
kubectl expose rc default-http-backend --port=80 --target-port=8080 --name=default-http-backend
|
||||
```
|
||||
|
||||
Check NGINX it is running with the defined Ingress rules:
|
||||
|
||||
```
|
||||
$ LBIP=$(kubectl get node `kubectl get po -l name=nginx-ingress-lb --template '{{range .items}}{{.spec.nodeName}}{{end}}'` --template '{{range $i, $n := .status.addresses}}{{if eq $n.type "ExternalIP"}}{{$n.address}}{{end}}{{end}}')
|
||||
$ curl $LBIP/foo -H 'Host: foo.bar.com'
|
||||
```
|
||||
|
||||
## HTTPS
|
||||
|
||||
You can secure an Ingress by specifying a secret that contains a TLS private key and certificate. Currently the Ingress only supports a single TLS port, 443, and assumes TLS termination. This controller supports SNI. The TLS secret must contain keys named tls.crt and tls.key that contain the certificate and private key to use for TLS, eg:
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
data:
|
||||
tls.crt: base64 encoded cert
|
||||
tls.key: base64 encoded key
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: foo-secret
|
||||
namespace: default
|
||||
type: kubernetes.io/tls
|
||||
```
|
||||
|
||||
Referencing this secret in an Ingress will tell the Ingress controller to secure the channel from the client to the loadbalancer using TLS:
|
||||
|
||||
```
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: no-rules-map
|
||||
spec:
|
||||
tls:
|
||||
secretName: foo-secret
|
||||
backend:
|
||||
serviceName: s1
|
||||
servicePort: 80
|
||||
```
|
||||
Please follow [PREREQUISITES](../../examples/PREREQUISITES.md) as a guide on how to generate secrets containing SSL certificates. The name of the secret can be different than the name of the certificate.
|
||||
|
||||
Check the [example](../../examples/tls-termination/nginx)
|
||||
|
||||
### Default SSL Certificate
|
||||
|
||||
NGINX provides the option [server name](http://nginx.org/en/docs/http/server_names.html) as a catch-all in case of requests that do not match one of the configured server names. This configuration works without issues for HTTP traffic. In case of HTTPS NGINX requires a certificate. For this reason the Ingress controller provides the flag `--default-ssl-certificate`. The secret behind this flag contains the default certificate to be used in the mentioned case.
|
||||
If this flag is not provided NGINX will use a self signed certificate.
|
||||
|
||||
Running without the flag `--default-ssl-certificate`:
|
||||
|
||||
```
|
||||
$ curl -v https://10.2.78.7:443 -k
|
||||
* Rebuilt URL to: https://10.2.78.7:443/
|
||||
* Trying 10.2.78.4...
|
||||
* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0)
|
||||
* ALPN, offering http/1.1
|
||||
* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH
|
||||
* successfully set certificate verify locations:
|
||||
* CAfile: /etc/ssl/certs/ca-certificates.crt
|
||||
CApath: /etc/ssl/certs
|
||||
* TLSv1.2 (OUT), TLS header, Certificate Status (22):
|
||||
* TLSv1.2 (OUT), TLS handshake, Client hello (1):
|
||||
* TLSv1.2 (IN), TLS handshake, Server hello (2):
|
||||
* TLSv1.2 (IN), TLS handshake, Certificate (11):
|
||||
* TLSv1.2 (IN), TLS handshake, Server key exchange (12):
|
||||
* TLSv1.2 (IN), TLS handshake, Server finished (14):
|
||||
* TLSv1.2 (OUT), TLS handshake, Client key exchange (16):
|
||||
* TLSv1.2 (OUT), TLS change cipher, Client hello (1):
|
||||
* TLSv1.2 (OUT), TLS handshake, Finished (20):
|
||||
* TLSv1.2 (IN), TLS change cipher, Client hello (1):
|
||||
* TLSv1.2 (IN), TLS handshake, Finished (20):
|
||||
* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256
|
||||
* ALPN, server accepted to use http/1.1
|
||||
* Server certificate:
|
||||
* subject: CN=foo.bar.com
|
||||
* start date: Apr 13 00:50:56 2016 GMT
|
||||
* expire date: Apr 13 00:50:56 2017 GMT
|
||||
* issuer: CN=foo.bar.com
|
||||
* SSL certificate verify result: self signed certificate (18), continuing anyway.
|
||||
> GET / HTTP/1.1
|
||||
> Host: 10.2.78.7
|
||||
> User-Agent: curl/7.47.1
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 404 Not Found
|
||||
< Server: nginx/1.11.1
|
||||
< Date: Thu, 21 Jul 2016 15:38:46 GMT
|
||||
< Content-Type: text/html
|
||||
< Transfer-Encoding: chunked
|
||||
< Connection: keep-alive
|
||||
< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload
|
||||
<
|
||||
<span>The page you're looking for could not be found.</span>
|
||||
|
||||
* Connection #0 to host 10.2.78.7 left intact
|
||||
```
|
||||
|
||||
Specifying `--default-ssl-certificate=default/foo-tls`:
|
||||
|
||||
```
|
||||
core@localhost ~ $ curl -v https://10.2.78.7:443 -k
|
||||
* Rebuilt URL to: https://10.2.78.7:443/
|
||||
* Trying 10.2.78.7...
|
||||
* Connected to 10.2.78.7 (10.2.78.7) port 443 (#0)
|
||||
* ALPN, offering http/1.1
|
||||
* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH
|
||||
* successfully set certificate verify locations:
|
||||
* CAfile: /etc/ssl/certs/ca-certificates.crt
|
||||
CApath: /etc/ssl/certs
|
||||
* TLSv1.2 (OUT), TLS header, Certificate Status (22):
|
||||
* TLSv1.2 (OUT), TLS handshake, Client hello (1):
|
||||
* TLSv1.2 (IN), TLS handshake, Server hello (2):
|
||||
* TLSv1.2 (IN), TLS handshake, Certificate (11):
|
||||
* TLSv1.2 (IN), TLS handshake, Server key exchange (12):
|
||||
* TLSv1.2 (IN), TLS handshake, Server finished (14):
|
||||
* TLSv1.2 (OUT), TLS handshake, Client key exchange (16):
|
||||
* TLSv1.2 (OUT), TLS change cipher, Client hello (1):
|
||||
* TLSv1.2 (OUT), TLS handshake, Finished (20):
|
||||
* TLSv1.2 (IN), TLS change cipher, Client hello (1):
|
||||
* TLSv1.2 (IN), TLS handshake, Finished (20):
|
||||
* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256
|
||||
* ALPN, server accepted to use http/1.1
|
||||
* Server certificate:
|
||||
* subject: CN=foo.bar.com
|
||||
* start date: Apr 13 00:50:56 2016 GMT
|
||||
* expire date: Apr 13 00:50:56 2017 GMT
|
||||
* issuer: CN=foo.bar.com
|
||||
* SSL certificate verify result: self signed certificate (18), continuing anyway.
|
||||
> GET / HTTP/1.1
|
||||
> Host: 10.2.78.7
|
||||
> User-Agent: curl/7.47.1
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 404 Not Found
|
||||
< Server: nginx/1.11.1
|
||||
< Date: Mon, 18 Jul 2016 21:02:59 GMT
|
||||
< Content-Type: text/html
|
||||
< Transfer-Encoding: chunked
|
||||
< Connection: keep-alive
|
||||
< Strict-Transport-Security: max-age=15724800; includeSubDomains; preload
|
||||
<
|
||||
<span>The page you're looking for could not be found.</span>
|
||||
|
||||
* Connection #0 to host 10.2.78.7 left intact
|
||||
```
|
||||
|
||||
|
||||
### Server-side HTTPS enforcement
|
||||
|
||||
By default the controller redirects (301) to HTTPS if TLS is enabled for that ingress . If you want to disable that behaviour globally, you can use `ssl-redirect: "false"` in the NGINX config map.
|
||||
|
||||
To configure this feature for specific ingress resources, you can use the `ingress.kubernetes.io/ssl-redirect: "false"` annotation in the particular resource.
|
||||
|
||||
|
||||
### HTTP Strict Transport Security
|
||||
|
||||
HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS.
|
||||
|
||||
By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule.
|
||||
|
||||
To disable this behavior use `hsts=false` in the NGINX config map.
|
||||
|
||||
|
||||
### Automated Certificate Management with Kube-Lego
|
||||
|
||||
[Kube-Lego] automatically requests missing or expired certificates from
|
||||
[Let's Encrypt] by monitoring ingress resources and their referenced secrets. To
|
||||
enable this for an ingress resource you have to add an annotation:
|
||||
|
||||
```
|
||||
kubectl annotate ing ingress-demo kubernetes.io/tls-acme="true"
|
||||
```
|
||||
|
||||
To setup Kube-Lego you can take a look at this [full example]. The first
|
||||
version to fully support Kube-Lego is nginx Ingress controller 0.8.
|
||||
|
||||
[full example]:https://github.com/jetstack/kube-lego/tree/master/examples
|
||||
[Kube-Lego]:https://github.com/jetstack/kube-lego
|
||||
[Let's Encrypt]:https://letsencrypt.org
|
||||
|
||||
## Source IP address
|
||||
|
||||
By default NGINX uses the content of the header `X-Forwarded-For` as the source of truth to get information about the client IP address. This works without issues in L7 **if we configure the setting `proxy-real-ip-cidr`** with the correct information of the IP/network address of the external load balancer.
|
||||
If the ingress controller is running in AWS we need to use the VPC IPv4 CIDR. This allows NGINX to avoid the spoofing of the header.
|
||||
Another option is to enable proxy protocol using `use-proxy-protocol: "true"`.
|
||||
In this mode NGINX do not uses the content of the header to get the source IP address of the connection.
|
||||
|
||||
## Exposing TCP services
|
||||
|
||||
Ingress does not support TCP services (yet). For this reason this Ingress controller uses the flag `--tcp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `<namespace/service name>:<service port>:[PROXY]:[PROXY]`
|
||||
It is possible to use a number or the name of the port. The two last fields are optional. Adding `PROXY` in either or both of the two last fields we can use Proxy Protocol decoding (listen) and/or encoding (proxy_pass) in a TCP service (https://www.nginx.com/resources/admin-guide/proxy-protocol/).
|
||||
|
||||
The next example shows how to expose the service `example-go` running in the namespace `default` in the port `8080` using the port `9000`
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: tcp-configmap-example
|
||||
data:
|
||||
9000: "default/example-go:8080"
|
||||
```
|
||||
|
||||
|
||||
Please check the [tcp services](../../examples/tcp/nginx/README.md) example
|
||||
|
||||
## Exposing UDP services
|
||||
|
||||
Since 1.9.13 NGINX provides [UDP Load Balancing](https://www.nginx.com/blog/announcing-udp-load-balancing/).
|
||||
|
||||
Ingress does not support UDP services (yet). For this reason this Ingress controller uses the flag `--udp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `<namespace/service name>:<service port>`
|
||||
It is possible to use a number or the name of the port.
|
||||
|
||||
The next example shows how to expose the service `kube-dns` running in the namespace `kube-system` in the port `53` using the port `53`
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: udp-configmap-example
|
||||
data:
|
||||
53: "kube-system/kube-dns:53"
|
||||
```
|
||||
|
||||
|
||||
Please check the [udp services](../../examples/udp/nginx/README.md) example
|
||||
|
||||
## Proxy Protocol
|
||||
|
||||
If you are using a L4 proxy to forward the traffic to the NGINX pods and terminate HTTP/HTTPS there, you will lose the remote endpoint's IP addresses. To prevent this you could use the [Proxy Protocol](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt) for forwarding traffic, this will send the connection details before forwarding the actual TCP connection itself.
|
||||
|
||||
Amongst others [ELBs in AWS](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-proxy-protocol.html) and [HAProxy](http://www.haproxy.org/) support Proxy Protocol.
|
||||
|
||||
Please check the [proxy-protocol](examples/proxy-protocol/) example
|
||||
|
||||
### Opentracing
|
||||
|
||||
Using the third party module [rnburn/nginx-opentracing](https://github.com/rnburn/nginx-opentracing) the NGINX ingress controller can configure NGINX to enable [OpenTracing](http://opentracing.io) instrumentation.
|
||||
By default this feature is disabled.
|
||||
|
||||
To enable the instrumentation we just need to enable the instrumentation in the configuration configmap and set the host where we should send the traces.
|
||||
|
||||
In the [aledbf/zipkin-js-example](https://github.com/aledbf/zipkin-js-example) github repository is possible to see a dockerized version of zipkin-js-example with the required Kubernetes descriptors.
|
||||
To install the example and the zipkin collector we just need to run:
|
||||
|
||||
```
|
||||
$ kubectl create -f https://raw.githubusercontent.com/aledbf/zipkin-js-example/kubernetes/kubernetes/zipkin.yaml
|
||||
$ kubectl create -f https://raw.githubusercontent.com/aledbf/zipkin-js-example/kubernetes/kubernetes/deployment.yaml
|
||||
```
|
||||
|
||||
Also we need to configure the NGINX controller configmap with the required values:
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
data:
|
||||
enable-opentracing: "true"
|
||||
zipkin-collector-host: zipkin.default.svc.cluster.local
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
name: nginx-custom-configuration
|
||||
```
|
||||
|
||||
Using curl we can generate some traces:
|
||||
```
|
||||
$ curl -v http://$(minikube ip)/api -H 'Host: zipkin-js-example'
|
||||
$ curl -v http://$(minikube ip)/api -H 'Host: zipkin-js-example'
|
||||
```
|
||||
|
||||
In the zipkin inteface we can see the details:
|
||||
|
||||

|
||||
|
||||
### Custom errors
|
||||
|
||||
In case of an error in a request the body of the response is obtained from the `default backend`.
|
||||
Each request to the default backend includes two headers:
|
||||
|
||||
- `X-Code` indicates the HTTP code to be returned to the client.
|
||||
- `X-Format` the value of the `Accept` header.
|
||||
|
||||
**Important:** the custom backend must return the correct HTTP status code to be returned. NGINX do not changes the reponse from the custom default backend.
|
||||
|
||||
Using this two headers is possible to use a custom backend service like [this one](https://github.com/kubernetes/ingress/tree/master/examples/customization/custom-errors/nginx) that inspect each request and returns a custom error page with the format expected by the client. Please check the example [custom-errors](examples/customization/custom-errors/nginx/README.md)
|
||||
|
||||
NGINX sends aditional headers that can be used to build custom response:
|
||||
|
||||
- X-Original-URI
|
||||
- X-Namespace
|
||||
- X-Ingress-Name
|
||||
- X-Service-Name
|
||||
|
||||
### NGINX status page
|
||||
|
||||
The ngx_http_stub_status_module module provides access to basic status information. This is the default module active in the url `/nginx_status`.
|
||||
This controller provides an alternative to this module using [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) third party module.
|
||||
To use this module just provide a config map with the key `enable-vts-status=true`. The URL is exposed in the port 18080.
|
||||
Please check the example `example/rc-default.yaml`
|
||||
|
||||

|
||||
|
||||
To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json`
|
||||
|
||||
### Running multiple ingress controllers
|
||||
|
||||
If you're running multiple ingress controllers, or running on a cloudprovider that natively handles
|
||||
ingress, you need to specify the annotation `kubernetes.io/ingress.class: "nginx"` in all ingresses
|
||||
that you would like this controller to claim. Not specifying the annotation will lead to multiple
|
||||
ingress controllers claiming the same ingress. Specifying the wrong value will result in all ingress
|
||||
controllers ignoring the ingress. Multiple ingress controllers running in the same cluster was not
|
||||
supported in Kubernetes versions < 1.3.
|
||||
|
||||
### Running on Cloudproviders
|
||||
|
||||
If you're running this ingress controller on a cloudprovider, you should assume the provider also has a native
|
||||
Ingress controller and specify the ingress.class annotation as indicated in this section.
|
||||
In addition to this, you will need to add a firewall rule for each port this controller is listening on, i.e :80 and :443.
|
||||
|
||||
### Disabling NGINX ingress controller
|
||||
|
||||
Setting the annotation `kubernetes.io/ingress.class` to any value other than "nginx" or the empty string, will force the NGINX Ingress controller to ignore your Ingress. Do this if you wish to use one of the other Ingress controllers at the same time as the NGINX controller.
|
||||
|
||||
### Log format
|
||||
|
||||
The default configuration uses a custom logging format to add additional information about upstreams
|
||||
|
||||
```
|
||||
log_format upstreaminfo '{{ if $cfg.useProxyProtocol }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - '
|
||||
'[$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" '
|
||||
'$request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status';
|
||||
```
|
||||
|
||||
Sources:
|
||||
- [upstream variables](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#variables)
|
||||
- [embedded variables](http://nginx.org/en/docs/http/ngx_http_core_module.html#variables)
|
||||
|
||||
Description:
|
||||
- `$proxy_protocol_addr`: if PROXY protocol is enabled
|
||||
- `$remote_addr`: if PROXY protocol is disabled (default)
|
||||
- `$proxy_add_x_forwarded_for`: the `X-Forwarded-For` client request header field with the $remote_addr variable appended to it, separated by a comma
|
||||
- `$remote_user`: user name supplied with the Basic authentication
|
||||
- `$time_local`: local time in the Common Log Format
|
||||
- `$request`: full original request line
|
||||
- `$status`: response status
|
||||
- `$body_bytes_sent`: number of bytes sent to a client, not counting the response header
|
||||
- `$http_referer`: value of the Referer header
|
||||
- `$http_user_agent`: value of User-Agent header
|
||||
- `$request_length`: request length (including request line, header, and request body)
|
||||
- `$request_time`: time elapsed since the first bytes were read from the client
|
||||
- `$proxy_upstream_name`: name of the upstream. The format is `upstream-<namespace>-<service name>-<service port>`
|
||||
- `$upstream_addr`: keeps the IP address and port, or the path to the UNIX-domain socket of the upstream server. If several servers were contacted during request processing, their addresses are separated by commas
|
||||
- `$upstream_response_length`: keeps the length of the response obtained from the upstream server
|
||||
- `$upstream_response_time`: keeps time spent on receiving the response from the upstream server; the time is kept in seconds with millisecond resolution
|
||||
- `$upstream_status`: keeps status code of the response obtained from the upstream server
|
||||
|
||||
### Local cluster
|
||||
|
||||
Using [`hack/local-up-cluster.sh`](https://github.com/kubernetes/kubernetes/blob/master/hack/local-up-cluster.sh) is possible to start a local kubernetes cluster consisting of a master and a single node. Please read [running-locally.md](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md) for more details.
|
||||
|
||||
Use of `hostNetwork: true` in the ingress controller is required to falls back at localhost:8080 for the apiserver if every other client creation check fails (eg: service account not present, kubeconfig doesn't exist, no master env vars...)
|
||||
|
||||
### Debug & Troubleshooting
|
||||
|
||||
Using the flag `--v=XX` it is possible to increase the level of logging.
|
||||
In particular:
|
||||
- `--v=2` shows details using `diff` about the changes in the configuration in nginx
|
||||
|
||||
```
|
||||
I0316 12:24:37.581267 1 utils.go:148] NGINX configuration diff a//etc/nginx/nginx.conf b//etc/nginx/nginx.conf
|
||||
I0316 12:24:37.581356 1 utils.go:149] --- /tmp/922554809 2016-03-16 12:24:37.000000000 +0000
|
||||
+++ /tmp/079811012 2016-03-16 12:24:37.000000000 +0000
|
||||
@@ -235,7 +235,6 @@
|
||||
|
||||
upstream default-echoheadersx {
|
||||
least_conn;
|
||||
- server 10.2.112.124:5000;
|
||||
server 10.2.208.50:5000;
|
||||
|
||||
}
|
||||
I0316 12:24:37.610073 1 command.go:69] change in configuration detected. Reloading...
|
||||
```
|
||||
|
||||
- `--v=3` shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format
|
||||
- `--v=5` configures NGINX in [debug mode](http://nginx.org/en/docs/debugging_log.html)
|
||||
|
||||
### Limitations
|
||||
|
||||
- Ingress rules for TLS require the definition of the field `host`
|
||||
|
||||
### Why endpoints and not services
|
||||
|
||||
The NGINX ingress controller does not uses [Services](http://kubernetes.io/docs/user-guide/services) to route traffic to the pods. Instead it uses the Endpoints API in order to bypass [kube-proxy](http://kubernetes.io/docs/admin/kube-proxy/) to allow NGINX features like session affinity and custom load balancing algorithms. It also removes some overhead, such as conntrack entries for iptables DNAT.
|
||||
|
||||
### NGINX notes
|
||||
|
||||
Since `gcr.io/google_containers/nginx-slim:0.8` NGINX contains the next patches:
|
||||
- Dynamic TLS record size [nginx__dynamic_tls_records.patch](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/)
|
||||
NGINX provides the parameter `ssl_buffer_size` to adjust the size of the buffer. Default value in NGINX is 16KB. The ingress controller changes the default to 4KB. This improves the [TLS Time To First Byte (TTTFB)](https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/) but the size is fixed. This patches adapts the size of the buffer to the content is being served helping to improve the perceived latency.
|
|
@ -1,45 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strings
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testDatas = []struct {
|
||||
a string
|
||||
slice []string
|
||||
er bool
|
||||
}{
|
||||
{"first", []string{"first", "second"}, true},
|
||||
{"FIRST", []string{"first", "second"}, false},
|
||||
{"third", []string{"first", "second"}, false},
|
||||
{"first", nil, false},
|
||||
|
||||
{"", []string{"first", "second"}, false},
|
||||
{"", []string{"first", "second", ""}, true},
|
||||
{"", nil, false},
|
||||
}
|
||||
|
||||
func TestStringInSlice(t *testing.T) {
|
||||
for _, testData := range testDatas {
|
||||
r := StringInSlice(testData.a, testData.slice)
|
||||
if r != testData.er {
|
||||
t.Errorf("getted result is '%t', but expected is '%t'", r, testData.er)
|
||||
}
|
||||
}
|
||||
}
|
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
|
@ -1,20 +0,0 @@
|
|||
# Copyright 2017 The Kubernetes Authors. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# TODO(ingress#191): Change this to something more appropriate, like busybox
|
||||
From ubuntu:15.10
|
||||
MAINTAINER Prashanth B <beeps@google.com>
|
||||
RUN apt-get update && apt-get install ssl-cert -y
|
||||
COPY server /
|
||||
ENTRYPOINT ["/server"]
|
|
@ -1,39 +0,0 @@
|
|||
# Copyright 2017 The Kubernetes Authors. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Build the default backend binary or image for amd64, arm, arm64 and ppc64le
|
||||
#
|
||||
# Usage:
|
||||
# [PREFIX=gcr.io/google_containers/dummy-ingress-controller] [ARCH=amd64] [TAG=1.1] make (server|container|push)
|
||||
|
||||
all: push
|
||||
|
||||
TAG=0.1
|
||||
PREFIX?=bprashanth/dummy-ingress-controller
|
||||
ARCH?=amd64
|
||||
GOLANG_VERSION=1.6
|
||||
TEMP_DIR:=$(shell mktemp -d)
|
||||
|
||||
server: server.go
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=$(ARCH) GOARM=6 godep go build -a -installsuffix cgo -ldflags '-w' -o server ./server.go
|
||||
|
||||
container: server
|
||||
docker build --pull -t $(PREFIX)-$(ARCH):$(TAG) .
|
||||
|
||||
push: container
|
||||
gcloud docker -- push $(PREFIX)-$(ARCH):$(TAG)
|
||||
|
||||
clean:
|
||||
rm -f server
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
# Dummy controller
|
||||
|
||||
This example contains the source code of a simple dummy controller. If you want
|
||||
more details on the interface, or what the generic controller is actually doing,
|
||||
please read [this doc](/docs/dev/getting-started.md). You can deploy the controller as
|
||||
follows:
|
||||
|
||||
```console
|
||||
$ kubectl create -f deployment.yaml
|
||||
service "default-backend" created
|
||||
deployment "dummy-ingress-controller" created
|
||||
|
||||
$ kubectl get po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
dummy-ingress-controller-3685541482-082nl 1/1 Running 0 10m
|
||||
|
||||
$ kubectl logs dummy-ingress-controller-3685541482-082nl
|
||||
I0131 02:29:02.462123 1 launch.go:92] &{dummy 0.0.0 git-00000000 git://foo.bar.com}
|
||||
I0131 02:29:02.462513 1 launch.go:221] Creating API server client for https://10.0.0.1:443
|
||||
I0131 02:29:02.494571 1 launch.go:111] validated default/default-backend as the default backend
|
||||
I0131 02:29:02.503180 1 controller.go:1038] starting Ingress controller
|
||||
I0131 02:29:02.513528 1 leaderelection.go:247] lock is held by dummy-ingress-controller-3685541482-50jh0 and has not yet expired
|
||||
W0131 02:29:03.510699 1 queue.go:87] requeuing kube-system/kube-scheduler, err deferring sync till endpoints controller has synced
|
||||
W0131 02:29:03.514445 1 queue.go:87] requeuing kube-system/node-controller-token-826dl, err deferring sync till endpoints controller has synced
|
||||
2017/01/31 02:29:12 Received OnUpdate notification
|
||||
2017/01/31 02:29:12 upstream-default-backend: 10.180.1.20
|
||||
```
|
||||
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: default-backend
|
||||
namespace: default
|
||||
labels:
|
||||
name: default-backend
|
||||
app: dummy-ingress-controller
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 10254
|
||||
selector:
|
||||
# Point back the the dummy controller's
|
||||
# healthz port
|
||||
app: dummy-ingress-controller
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: dummy-ingress-controller
|
||||
namespace: default
|
||||
labels:
|
||||
app: dummy-ingress-controller
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dummy-ingress-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dummy-ingress-controller
|
||||
spec:
|
||||
containers:
|
||||
- name: server
|
||||
image: bprashanth/dummy-ingress-controller-amd64:0.1
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 10254
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- /server
|
||||
- --default-backend-service=$(POD_NAMESPACE)/default-backend
|
|
@ -1,121 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
|
||||
nginxconfig "k8s.io/ingress/controllers/nginx/pkg/config"
|
||||
"k8s.io/ingress/core/pkg/ingress"
|
||||
"k8s.io/ingress/core/pkg/ingress/controller"
|
||||
"k8s.io/ingress/core/pkg/ingress/defaults"
|
||||
)
|
||||
|
||||
func main() {
|
||||
dc := newDummyController()
|
||||
ic := controller.NewIngressController(dc)
|
||||
defer func() {
|
||||
log.Printf("Shutting down ingress controller...")
|
||||
ic.Stop()
|
||||
}()
|
||||
ic.Start()
|
||||
}
|
||||
|
||||
func newDummyController() ingress.Controller {
|
||||
return &DummyController{}
|
||||
}
|
||||
|
||||
type DummyController struct{}
|
||||
|
||||
func (dc DummyController) SetConfig(cfgMap *api.ConfigMap) {
|
||||
log.Printf("Config map %+v", cfgMap)
|
||||
}
|
||||
|
||||
func (dc DummyController) Test(file string) *exec.Cmd {
|
||||
return exec.Command("echo", file)
|
||||
}
|
||||
|
||||
func (dc DummyController) OnUpdate(updatePayload ingress.Configuration) error {
|
||||
log.Printf("Received OnUpdate notification")
|
||||
for _, b := range updatePayload.Backends {
|
||||
eps := []string{}
|
||||
for _, e := range b.Endpoints {
|
||||
eps = append(eps, e.Address)
|
||||
}
|
||||
log.Printf("%v: %v", b.Name, strings.Join(eps, ", "))
|
||||
}
|
||||
|
||||
log.Printf("Reloaded new config")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dc DummyController) BackendDefaults() defaults.Backend {
|
||||
// Just adopt nginx's default backend config
|
||||
return nginxconfig.NewDefault().Backend
|
||||
}
|
||||
|
||||
func (n DummyController) Name() string {
|
||||
return "dummy Controller"
|
||||
}
|
||||
|
||||
func (n DummyController) Check(_ *http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dc DummyController) Info() *ingress.BackendInfo {
|
||||
return &ingress.BackendInfo{
|
||||
Name: "dummy",
|
||||
Release: "0.0.0",
|
||||
Build: "git-00000000",
|
||||
Repository: "git://foo.bar.com",
|
||||
}
|
||||
}
|
||||
|
||||
func (n DummyController) ConfigureFlags(*pflag.FlagSet) {
|
||||
}
|
||||
|
||||
func (n DummyController) OverrideFlags(*pflag.FlagSet) {
|
||||
}
|
||||
|
||||
func (n DummyController) SetListers(lister *ingress.StoreLister) {
|
||||
|
||||
}
|
||||
|
||||
func (n DummyController) DefaultIngressClass() string {
|
||||
return "dummy"
|
||||
}
|
||||
|
||||
func (n DummyController) UpdateIngressStatus(*extensions.Ingress) []api.LoadBalancerIngress {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DefaultEndpoint returns the default endpoint to be use as default server that returns 404.
|
||||
func (n DummyController) DefaultEndpoint() ingress.Endpoint {
|
||||
return ingress.Endpoint{
|
||||
Address: "127.0.0.1",
|
||||
Port: "8181",
|
||||
Target: &api.ObjectReference{},
|
||||
}
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
# Deploying the GCE Ingress controller
|
||||
|
||||
This example demonstrates the deployment of a GCE Ingress controller.
|
||||
|
||||
Note: __all GCE/GKE clusters already have an Ingress controller running
|
||||
on the master. The only reason to deploy another GCE controller is if you want
|
||||
to debug or otherwise observe its operation (eg via kubectl logs).__
|
||||
|
||||
__Before deploying another one in your cluster, make sure you disable the master controller.__
|
||||
|
||||
## Disabling the master controller
|
||||
|
||||
See the hard disable options [here](/docs/faq/gce.md#how-do-i-disable-the-gce-ingress-controller).
|
||||
|
||||
## Deploying a new controller
|
||||
|
||||
The following command deploys a GCE Ingress controller in your cluster:
|
||||
|
||||
```console
|
||||
$ kubectl create -f gce-ingress-controller.yaml
|
||||
service "default-http-backend" created
|
||||
replicationcontroller "l7-lb-controller" created
|
||||
|
||||
$ kubectl get po -l name=glbc
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
l7-lb-controller-1s22c 2/2 Running 0 27s
|
||||
```
|
||||
|
||||
Now you can create an Ingress and observe the controller:
|
||||
|
||||
```console
|
||||
$ kubectl create -f gce-tls-ingress.yaml
|
||||
ingress "test" created
|
||||
|
||||
$ kubectl logs l7-lb-controller-1s22c -c l7-lb-controller
|
||||
I0201 01:03:17.387548 1 main.go:179] Starting GLBC image: glbc:0.9.2, cluster name
|
||||
I0201 01:03:18.459740 1 main.go:291] Using saved cluster uid "32658fa96c080068"
|
||||
I0201 01:03:18.459771 1 utils.go:122] Changing cluster name from to 32658fa96c080068
|
||||
I0201 01:03:18.461652 1 gce.go:331] Using existing Token Source &oauth2.reuseTokenSource{new:google.computeSource{account:""}, mu:sync.Mutex{state:0, sema:0x0}, t:(*oauth2.Token)(nil)}
|
||||
I0201 01:03:18.553142 1 cluster_manager.go:264] Created GCE client without a config file
|
||||
I0201 01:03:18.553773 1 controller.go:234] Starting loadbalancer controller
|
||||
I0201 01:04:58.314271 1 event.go:217] Event(api.ObjectReference{Kind:"Ingress", Namespace:"default", Name:"test", UID:"73549716-e81a-11e6-a8c5-42010af00002", APIVersion:"extensions", ResourceVersion:"673016", FieldPath:""}): type: 'Normal' reason: 'ADD' default/test
|
||||
I0201 01:04:58.413616 1 instances.go:76] Creating instance group k8s-ig--32658fa96c080068 in zone us-central1-b
|
||||
I0201 01:05:01.998169 1 gce.go:2084] Adding port 30301 to instance group k8s-ig--32658fa96c080068 with 0 ports
|
||||
I0201 01:05:02.444014 1 backends.go:149] Creating backend for 1 instance groups, port 30301 named port &{port30301 30301 []}
|
||||
I0201 01:05:02.444175 1 utils.go:495] No pod in service http-svc with node port 30301 has declared a matching readiness probe for health checks.
|
||||
I0201 01:05:02.555599 1 healthchecks.go:62] Creating health check k8s-be-30301--32658fa96c080068
|
||||
I0201 01:05:11.300165 1 gce.go:2084] Adding port 31938 to instance group k8s-ig--32658fa96c080068 with 1 ports
|
||||
I0201 01:05:11.743914 1 backends.go:149] Creating backend for 1 instance groups, port 31938 named port &{port31938 31938 []}
|
||||
I0201 01:05:11.744008 1 utils.go:495] No pod in service default-http-backend with node port 31938 has declared a matching readiness probe for health checks.
|
||||
I0201 01:05:11.811972 1 healthchecks.go:62] Creating health check k8s-be-31938--32658fa96c080068
|
||||
I0201 01:05:19.871791 1 loadbalancers.go:121] Creating l7 default-test--32658fa96c080068
|
||||
...
|
||||
|
||||
$ kubectl get ing test
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
test * 35.186.208.106 80, 443 4m
|
||||
|
||||
$ curl 35.186.208.106 -kL
|
||||
CLIENT VALUES:
|
||||
client_address=10.180.3.1
|
||||
command=GET
|
||||
real path=/
|
||||
query=nil
|
||||
request_version=1.1
|
||||
request_uri=http://35.186.208.106:8080/
|
||||
...
|
||||
```
|
|
@ -1,82 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
# This must match the --default-backend-service argument of the l7 lb
|
||||
# controller and is required because GCE mandates a default backend.
|
||||
name: default-http-backend
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
spec:
|
||||
# The default backend must be of type NodePort.
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
k8s-app: glbc
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: l7-lb-controller
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
version: v0.9.0
|
||||
spec:
|
||||
# There should never be more than 1 controller alive simultaneously.
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: glbc
|
||||
version: v0.9.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
version: v0.9.0
|
||||
name: glbc
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 600
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- image: gcr.io/google_containers/glbc:0.9.2
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
name: l7-lb-controller
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
args:
|
||||
- --default-backend-service=default/default-http-backend
|
||||
- --sync-period=300s
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: test
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "gce"
|
||||
spec:
|
||||
tls:
|
||||
# This assumes tls-secret exists.
|
||||
- secretName: tls-secret
|
||||
backend:
|
||||
# This assumes http-svc exists and routes to healthy endpoints.
|
||||
serviceName: http-svc
|
||||
servicePort: 80
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
# Simple HTTP health check example
|
||||
|
||||
The GCE Ingress controller adopts the readiness probe from the matching endpoints, provided the readiness probe doesn't require HTTPS or special headers.
|
||||
|
||||
Create the following app:
|
||||
```console
|
||||
$ kubectl create -f health_check_app.yaml
|
||||
replicationcontroller "echoheaders" created
|
||||
You have exposed your service on an external port on all nodes in your
|
||||
cluster. If you want to expose this service to the external internet, you may
|
||||
need to set up firewall rules for the service port(s) (tcp:31165) to serve traffic.
|
||||
|
||||
See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
|
||||
service "echoheadersx" created
|
||||
You have exposed your service on an external port on all nodes in your
|
||||
cluster. If you want to expose this service to the external internet, you may
|
||||
need to set up firewall rules for the service port(s) (tcp:31020) to serve traffic.
|
||||
|
||||
See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
|
||||
service "echoheadersy" created
|
||||
ingress "echomap" created
|
||||
```
|
||||
|
||||
You should soon find an Ingress that is backed by a GCE Loadbalancer.
|
||||
|
||||
```console
|
||||
$ kubectl describe ing echomap
|
||||
Name: echomap
|
||||
Namespace: default
|
||||
Address: 107.178.255.228
|
||||
Default backend: default-http-backend:80 (10.180.0.9:8080,10.240.0.2:8080)
|
||||
Rules:
|
||||
Host Path Backends
|
||||
---- ---- --------
|
||||
foo.bar.com
|
||||
/foo echoheadersx:80 (<none>)
|
||||
bar.baz.com
|
||||
/bar echoheadersy:80 (<none>)
|
||||
/foo echoheadersx:80 (<none>)
|
||||
Annotations:
|
||||
target-proxy: k8s-tp-default-echomap--a9d60e8176d933ee
|
||||
url-map: k8s-um-default-echomap--a9d60e8176d933ee
|
||||
backends: {"k8s-be-31020--a9d60e8176d933ee":"HEALTHY","k8s-be-31165--a9d60e8176d933ee":"HEALTHY","k8s-be-31686--a9d60e8176d933ee":"HEALTHY"}
|
||||
forwarding-rule: k8s-fw-default-echomap--a9d60e8176d933ee
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
17m 17m 1 {loadbalancer-controller } Normal ADD default/echomap
|
||||
15m 15m 1 {loadbalancer-controller } Normal CREATE ip: 107.178.255.228
|
||||
|
||||
$ curl 107.178.255.228/foo -H 'Host:foo.bar.com'
|
||||
CLIENT VALUES:
|
||||
client_address=10.240.0.5
|
||||
command=GET
|
||||
real path=/foo
|
||||
query=nil
|
||||
request_version=1.1
|
||||
request_uri=http://foo.bar.com:8080/foo
|
||||
...
|
||||
```
|
||||
|
||||
You can confirm the health check endpoint point it's using one of 2 ways:
|
||||
* Through the cloud console: compute > health checks > lookup your health check. It takes the form k8s-be-nodePort-hash, where nodePort in the example above is 31165 and 31020, as shown by the kubectl output.
|
||||
* Through gcloud: Run `gcloud compute http-health-checks list`
|
||||
|
||||
## Limitations
|
||||
|
||||
A few points to note:
|
||||
* The readiness probe must be exposed on the port matching the `servicePort` specified in the Ingress
|
||||
* The readiness probe cannot have special requirements like headers
|
||||
* The probe timeouts are translated to GCE health check timeouts
|
||||
* You must create the pods backing the endpoints with the given readiness probe. This *will not* work if you update the replication controller with a different readiness probe.
|
|
@ -1,100 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: echoheaders
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
containers:
|
||||
- name: echoheaders
|
||||
image: gcr.io/google_containers/echoserver:1.8
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
periodSeconds: 1
|
||||
timeoutSeconds: 1
|
||||
successThreshold: 1
|
||||
failureThreshold: 10
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheadersx
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheadersy
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: echomap
|
||||
spec:
|
||||
rules:
|
||||
- host: foo.bar.com
|
||||
http:
|
||||
paths:
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheadersx
|
||||
servicePort: 80
|
||||
- host: bar.baz.com
|
||||
http:
|
||||
paths:
|
||||
- path: /bar
|
||||
backend:
|
||||
serviceName: echoheadersy
|
||||
servicePort: 80
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheadersx
|
||||
servicePort: 80
|
||||
|
|
@ -1,129 +0,0 @@
|
|||
# Static IPs
|
||||
|
||||
This example demonstrates how to assign a [static-ip](https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) to an Ingress on GCE.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You need a [TLS cert](/examples/PREREQUISITES.md#tls-certificates) and a [test HTTP service](/examples/PREREQUISITES.md#test-http-service) for this example.
|
||||
You will also need to make sure you Ingress targets exactly one Ingress
|
||||
controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class),
|
||||
and that you have an ingress controller [running](/examples/deployment) in your cluster.
|
||||
|
||||
## Acquiring a static IP
|
||||
|
||||
In GCE, static IP belongs to a given project until the owner decides to release
|
||||
it. If you create a static IP and assign it to an Ingress, deleting the Ingress
|
||||
or tearing down the GKE cluster *will not* delete the static IP. You can check
|
||||
the static IPs you have as follows
|
||||
|
||||
```console
|
||||
$ gcloud compute addresses list --global
|
||||
NAME REGION ADDRESS STATUS
|
||||
test-ip 35.186.221.137 RESERVED
|
||||
|
||||
$ gcloud compute addresses list
|
||||
NAME REGION ADDRESS STATUS
|
||||
test-ip 35.186.221.137 RESERVED
|
||||
test-ip us-central1 35.184.21.228 RESERVED
|
||||
```
|
||||
|
||||
Note the difference between a regional and a global static ip. Only global
|
||||
static-ips will work with Ingress. If you don't already have an IP, you can
|
||||
create it
|
||||
|
||||
```console
|
||||
$ gcloud compute addresses create test-ip --global
|
||||
Created [https://www.googleapis.com/compute/v1/projects/kubernetesdev/global/addresses/test-ip].
|
||||
---
|
||||
address: 35.186.221.137
|
||||
creationTimestamp: '2017-01-31T10:32:29.889-08:00'
|
||||
description: ''
|
||||
id: '9221457935391876818'
|
||||
kind: compute#address
|
||||
name: test-ip
|
||||
selfLink: https://www.googleapis.com/compute/v1/projects/kubernetesdev/global/addresses/test-ip
|
||||
status: RESERVED
|
||||
```
|
||||
|
||||
## Assigning a static IP to an Ingress
|
||||
|
||||
You can now add the static IP from the previous step to an Ingress,
|
||||
by specifying the `kubernetes.io/global-static-ip-name` annotation,
|
||||
the example yaml in this directory already has it set to `test-ip`
|
||||
|
||||
```console
|
||||
$ kubectl create -f gce-static-ip-ingress.yaml
|
||||
ingress "static-ip" created
|
||||
|
||||
$ gcloud compute addresses list test-ip
|
||||
NAME REGION ADDRESS STATUS
|
||||
test-ip 35.186.221.137 IN_USE
|
||||
test-ip us-central1 35.184.21.228 RESERVED
|
||||
|
||||
$ kubectl get ing
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
static-ip * 35.186.221.137 80, 443 1m
|
||||
|
||||
$ curl 35.186.221.137 -Lk
|
||||
CLIENT VALUES:
|
||||
client_address=10.180.1.1
|
||||
command=GET
|
||||
real path=/
|
||||
query=nil
|
||||
request_version=1.1
|
||||
request_uri=http://35.186.221.137:8080/
|
||||
...
|
||||
```
|
||||
|
||||
## Retaining the static IP
|
||||
|
||||
You can test retention by deleting the Ingress
|
||||
|
||||
```console
|
||||
$ kubectl delete -f gce-static-ip-ingress.yaml
|
||||
ingress "static-ip" deleted
|
||||
|
||||
$ kubectl get ing
|
||||
No resources found.
|
||||
|
||||
$ gcloud compute addresses list test-ip --global
|
||||
NAME REGION ADDRESS STATUS
|
||||
test-ip 35.186.221.137 RESERVED
|
||||
```
|
||||
|
||||
## Promote ephemeral to static IP
|
||||
|
||||
If you simply create a HTTP Ingress resource, it gets an ephemeral IP
|
||||
|
||||
```console
|
||||
$ kubectl create -f gce-http-ingress.yaml
|
||||
ingress "http-ingress" created
|
||||
|
||||
$ kubectl get ing
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
http-ingress * 35.186.195.33 80 1h
|
||||
|
||||
$ gcloud compute forwarding-rules list
|
||||
NAME REGION IP_ADDRESS IP_PROTOCOL TARGET
|
||||
k8s-fw-default-http-ingress--32658fa96c080068 35.186.195.33 TCP k8s-tp-default-http-ingress--32658fa96c080068
|
||||
```
|
||||
|
||||
Note that because this is an ephemeral IP, it won't show up in the output of
|
||||
`gcloud compute addresses list`.
|
||||
|
||||
If you either directly create an Ingress with a TLS section, or modify a HTTP
|
||||
Ingress to have a TLS section, it gets a static IP.
|
||||
|
||||
```console
|
||||
$ kubectl patch ing http-ingress -p '{"spec":{"tls":[{"secretName":"tls-secret"}]}}'
|
||||
"http-ingress" patched
|
||||
|
||||
$ kubectl get ing
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
http-ingress * 35.186.195.33 80, 443 1h
|
||||
|
||||
$ gcloud compute addresses list
|
||||
NAME REGION ADDRESS STATUS
|
||||
k8s-fw-default-http-ingress--32658fa96c080068 35.186.195.33 IN_USE
|
||||
```
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: http-ingress
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "gce"
|
||||
spec:
|
||||
backend:
|
||||
# This assumes http-svc exists and routes to healthy endpoints.
|
||||
serviceName: http-svc
|
||||
servicePort: 80
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: static-ip
|
||||
# Assumes a global static ip with the same name exists.
|
||||
# You can acquire a static IP by running
|
||||
# gcloud compute addresses create test-ip --global
|
||||
annotations:
|
||||
kubernetes.io/ingress.global-static-ip-name: "test-ip"
|
||||
kubernetes.io/ingress.class: "gce"
|
||||
spec:
|
||||
tls:
|
||||
# This assumes tls-secret exists.
|
||||
- secretName: tls-secret
|
||||
backend:
|
||||
# This assumes http-svc exists and routes to healthy endpoints.
|
||||
serviceName: http-svc
|
||||
servicePort: 80
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
# TLS termination
|
||||
|
||||
This example demonstrates how to terminate TLS through the GCE Ingress controller.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You need a [TLS cert](/examples/PREREQUISITES.md#tls-certificates) and a [test HTTP service](/examples/PREREQUISITES.md#test-http-service) for this example.
|
||||
You will also need to make sure you Ingress targets exactly one Ingress
|
||||
controller by specifying the [ingress.class annotation](/examples/PREREQUISITES.md#ingress-class),
|
||||
and that you have an ingress controller [running](/examples/deployment) in your cluster.
|
||||
|
||||
## Deployment
|
||||
|
||||
The following command instructs the controller to terminate traffic using
|
||||
the provided TLS cert, and forward un-encrypted HTTP traffic to the test
|
||||
HTTP service.
|
||||
|
||||
```console
|
||||
$ kubectl create -f gce-tls-ingress.yaml
|
||||
```
|
||||
|
||||
## Validation
|
||||
|
||||
You can confirm that the Ingress works.
|
||||
|
||||
```console
|
||||
$ kubectl describe ing gce-test
|
||||
Name: gce-test
|
||||
Namespace: default
|
||||
Address: 35.186.221.137
|
||||
Default backend: http-svc:80 (10.180.1.9:8080,10.180.3.6:8080)
|
||||
TLS:
|
||||
tls-secret terminates
|
||||
Rules:
|
||||
Host Path Backends
|
||||
---- ---- --------
|
||||
* * http-svc:80 (10.180.1.9:8080,10.180.3.6:8080)
|
||||
Annotations:
|
||||
target-proxy: k8s-tp-default-gce-test--32658fa96c080068
|
||||
url-map: k8s-um-default-gce-test--32658fa96c080068
|
||||
backends: {"k8s-be-30301--32658fa96c080068":"Unknown"}
|
||||
forwarding-rule: k8s-fw-default-gce-test--32658fa96c080068
|
||||
https-forwarding-rule: k8s-fws-default-gce-test--32658fa96c080068
|
||||
https-target-proxy: k8s-tps-default-gce-test--32658fa96c080068
|
||||
static-ip: k8s-fw-default-gce-test--32658fa96c080068
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
2m 2m 1 {loadbalancer-controller } Normal ADD default/gce-test
|
||||
1m 1m 1 {loadbalancer-controller } Normal CREATE ip: 35.186.221.137
|
||||
1m 1m 3 {loadbalancer-controller } Normal Service default backend set to http-svc:30301
|
||||
|
||||
$ curl 35.186.221.137 -k
|
||||
curl 35.186.221.137 -L
|
||||
curl: (60) SSL certificate problem: self signed certificate
|
||||
More details here: http://curl.haxx.se/docs/sslcerts.html
|
||||
|
||||
$ curl 35.186.221.137 -kl
|
||||
CLIENT VALUES:
|
||||
client_address=10.240.0.3
|
||||
command=GET
|
||||
real path=/
|
||||
query=nil
|
||||
request_version=1.1
|
||||
request_uri=http://35.186.221.137:8080/
|
||||
|
||||
SERVER VALUES:
|
||||
server_version=nginx: 1.9.11 - lua: 10001
|
||||
|
||||
HEADERS RECEIVED:
|
||||
accept=*/*
|
||||
connection=Keep-Alive
|
||||
host=35.186.221.137
|
||||
user-agent=curl/7.46.0
|
||||
via=1.1 google
|
||||
x-cloud-trace-context=bfa123130fd623989cca0192e43d9ba4/8610689379063045825
|
||||
x-forwarded-for=104.132.0.80, 35.186.221.137
|
||||
x-forwarded-proto=https
|
||||
```
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: test
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "gce"
|
||||
spec:
|
||||
tls:
|
||||
# This assumes tls-secret exists.
|
||||
- secretName: tls-secret
|
||||
backend:
|
||||
# This assumes http-svc exists and routes to healthy endpoints.
|
||||
serviceName: http-svc
|
||||
servicePort: 80
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
[[ $DEBUG ]] && set -x
|
||||
|
||||
set -eof pipefail
|
||||
|
||||
# include env
|
||||
. hack/e2e-internal/e2e-env.sh
|
||||
|
||||
echo "Destroying running docker containers..."
|
||||
# do not failt if the container is not running
|
||||
docker rm -f kubelet || true
|
||||
docker rm -f apiserver || true
|
||||
docker rm -f etcd || true
|
|
@ -1,21 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
[[ $DEBUG ]] && set -x
|
||||
|
||||
export ETCD_VERSION=3.0.14
|
||||
export K8S_VERSION=1.4.5
|
||||
|
||||
export PWD=`pwd`
|
||||
export BASEDIR="$(dirname ${BASH_SOURCE})"
|
||||
export KUBECTL="${BASEDIR}/kubectl"
|
||||
export GOOS="${GOOS:-linux}"
|
||||
|
||||
if [ ! -e ${KUBECTL} ]; then
|
||||
echo "kubectl binary is missing. downloading..."
|
||||
curl -sSL http://storage.googleapis.com/kubernetes-release/release/v${K8S_VERSION}/bin/${GOOS}/amd64/kubectl -o ${KUBECTL}
|
||||
chmod u+x ${KUBECTL}
|
||||
fi
|
||||
|
||||
${KUBECTL} config set-cluster travis --server=http://0.0.0.0:8080
|
||||
${KUBECTL} config set-context travis --cluster=travis
|
||||
${KUBECTL} config use-context travis
|
|
@ -1,11 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
[[ $DEBUG ]] && set -x
|
||||
|
||||
set -eof pipefail
|
||||
|
||||
# include env
|
||||
. hack/e2e-internal/e2e-env.sh
|
||||
|
||||
echo "Kubernetes information:"
|
||||
${KUBECTL} version
|
|
@ -1,55 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
[[ $DEBUG ]] && set -x
|
||||
|
||||
set -eof pipefail
|
||||
|
||||
# include env
|
||||
. hack/e2e-internal/e2e-env.sh
|
||||
|
||||
echo "Starting etcd..."
|
||||
docker run -d \
|
||||
--net=host \
|
||||
--name=etcd \
|
||||
quay.io/coreos/etcd:v$ETCD_VERSION
|
||||
|
||||
echo "Starting kubernetes..."
|
||||
|
||||
docker run -d --name=apiserver \
|
||||
--net=host \
|
||||
--pid=host \
|
||||
--privileged=true \
|
||||
gcr.io/google_containers/hyperkube:v${K8S_VERSION} \
|
||||
/hyperkube apiserver \
|
||||
--insecure-bind-address=0.0.0.0 \
|
||||
--service-cluster-ip-range=10.0.0.1/24 \
|
||||
--etcd_servers=http://127.0.0.1:4001 \
|
||||
--v=2
|
||||
|
||||
docker run -d --name=kubelet \
|
||||
--volume=/:/rootfs:ro \
|
||||
--volume=/sys:/sys:ro \
|
||||
--volume=/dev:/dev \
|
||||
--volume=/var/lib/docker/:/var/lib/docker:rw \
|
||||
--volume=/var/lib/kubelet/:/var/lib/kubelet:rw \
|
||||
--volume=/var/run:/var/run:rw \
|
||||
--net=host \
|
||||
--pid=host \
|
||||
--privileged=true \
|
||||
gcr.io/google_containers/hyperkube:v${K8S_VERSION} \
|
||||
/hyperkube kubelet \
|
||||
--containerized \
|
||||
--hostname-override="0.0.0.0" \
|
||||
--address="0.0.0.0" \
|
||||
--cluster_dns=10.0.0.10 --cluster_domain=cluster.local \
|
||||
--api-servers=http://localhost:8080 \
|
||||
--config=/etc/kubernetes/manifests-multi
|
||||
|
||||
echo "waiting until api server is available..."
|
||||
until curl -o /dev/null -sIf http://0.0.0.0:8080; do \
|
||||
sleep 10;
|
||||
done;
|
||||
|
||||
echo "Kubernetes started"
|
||||
echo "Kubernetes information:"
|
||||
${KUBECTL} version
|
|
@ -1,3 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
echo "running ginkgo"
|
285
hack/e2e.go
285
hack/e2e.go
|
@ -1,285 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// e2e.go runs the e2e test suite. No non-standard package dependencies; call with "go run".
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
build = flag.Bool("build", true, "Build the backends images indicated by the env var BACKENDS required to run e2e tests.")
|
||||
up = flag.Bool("up", true, "Creates a kubernetes cluster using hyperkube (containerized kubelet).")
|
||||
down = flag.Bool("down", true, "destroys the created cluster.")
|
||||
test = flag.Bool("test", true, "Run Ginkgo tests.")
|
||||
dump = flag.String("dump", "", "If set, dump cluster logs to this location on test or cluster-up failure")
|
||||
testArgs = flag.String("test-args", "", "Space-separated list of arguments to pass to Ginkgo test runner.")
|
||||
deployment = flag.String("deployment", "bash", "up/down mechanism")
|
||||
verbose = flag.Bool("v", false, "If true, print all command output.")
|
||||
)
|
||||
|
||||
func appendError(errs []error, err error) []error {
|
||||
if err != nil {
|
||||
return append(errs, err)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func validWorkingDirectory() error {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get pwd: %v", err)
|
||||
}
|
||||
acwd, err := filepath.Abs(cwd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert %s to an absolute path: %v", cwd, err)
|
||||
}
|
||||
if !strings.Contains(filepath.Base(acwd), "ingress") {
|
||||
return fmt.Errorf("must run from git root directory: %v", acwd)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TestCase struct {
|
||||
XMLName xml.Name `xml:"testcase"`
|
||||
ClassName string `xml:"classname,attr"`
|
||||
Name string `xml:"name,attr"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
Failure string `xml:"failure,omitempty"`
|
||||
}
|
||||
|
||||
type TestSuite struct {
|
||||
XMLName xml.Name `xml:"testsuite"`
|
||||
Failures int `xml:"failures,attr"`
|
||||
Tests int `xml:"tests,attr"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
Cases []TestCase
|
||||
}
|
||||
|
||||
var suite TestSuite
|
||||
|
||||
func xmlWrap(name string, f func() error) error {
|
||||
start := time.Now()
|
||||
err := f()
|
||||
duration := time.Since(start)
|
||||
c := TestCase{
|
||||
Name: name,
|
||||
ClassName: "e2e.go",
|
||||
Time: duration.Seconds(),
|
||||
}
|
||||
if err != nil {
|
||||
c.Failure = err.Error()
|
||||
suite.Failures++
|
||||
}
|
||||
suite.Cases = append(suite.Cases, c)
|
||||
suite.Tests++
|
||||
return err
|
||||
}
|
||||
|
||||
func writeXML(start time.Time) {
|
||||
suite.Time = time.Since(start).Seconds()
|
||||
out, err := xml.MarshalIndent(&suite, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalf("Could not marshal XML: %s", err)
|
||||
}
|
||||
path := filepath.Join(*dump, "junit_runner.xml")
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(xml.Header); err != nil {
|
||||
log.Fatalf("Error writing XML header: %s", err)
|
||||
}
|
||||
if _, err := f.Write(out); err != nil {
|
||||
log.Fatalf("Error writing XML data: %s", err)
|
||||
}
|
||||
log.Printf("Saved XML output to %s.", path)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
flag.Parse()
|
||||
|
||||
if err := validWorkingDirectory(); err != nil {
|
||||
log.Fatalf("Called from invalid working directory: %v", err)
|
||||
}
|
||||
|
||||
deploy, err := getDeployer()
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating deployer: %v", err)
|
||||
}
|
||||
|
||||
if err := run(deploy); err != nil {
|
||||
log.Fatalf("Something went wrong: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func run(deploy deployer) error {
|
||||
if *dump != "" {
|
||||
defer writeXML(time.Now())
|
||||
}
|
||||
|
||||
if *build {
|
||||
if err := xmlWrap("Build", Build); err != nil {
|
||||
return fmt.Errorf("error building: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if *up {
|
||||
if err := xmlWrap("TearDown", deploy.Down); err != nil {
|
||||
return fmt.Errorf("error tearing down previous cluster: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
var errs []error
|
||||
|
||||
if *up {
|
||||
// If we tried to bring the cluster up, make a courtesy
|
||||
// attempt to bring it down so we're not leaving resources around.
|
||||
//
|
||||
// TODO: We should try calling deploy.Down exactly once. Though to
|
||||
// stop the leaking resources for now, we want to be on the safe side
|
||||
// and call it explicitly in defer if the other one is not called.
|
||||
if *down {
|
||||
defer xmlWrap("Deferred TearDown", deploy.Down)
|
||||
}
|
||||
// Start the cluster using this version.
|
||||
if err := xmlWrap("Up", deploy.Up); err != nil {
|
||||
return fmt.Errorf("starting e2e cluster: %s", err)
|
||||
}
|
||||
if *dump != "" {
|
||||
cmd := exec.Command("./cluster/kubectl.sh", "--match-server-version=false", "get", "nodes", "-oyaml")
|
||||
b, err := cmd.CombinedOutput()
|
||||
if *verbose {
|
||||
log.Printf("kubectl get nodes:\n%s", string(b))
|
||||
}
|
||||
if err == nil {
|
||||
if err := ioutil.WriteFile(filepath.Join(*dump, "nodes.yaml"), b, 0644); err != nil {
|
||||
errs = appendError(errs, fmt.Errorf("error writing nodes.yaml: %v", err))
|
||||
}
|
||||
} else {
|
||||
errs = appendError(errs, fmt.Errorf("error running get nodes: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if *test {
|
||||
if err := xmlWrap("IsUp", deploy.IsUp); err != nil {
|
||||
errs = appendError(errs, err)
|
||||
} else {
|
||||
errs = appendError(errs, Test())
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 && *dump != "" {
|
||||
errs = appendError(errs, xmlWrap("DumpClusterLogs", func() error {
|
||||
return DumpClusterLogs(*dump)
|
||||
}))
|
||||
}
|
||||
|
||||
if *down {
|
||||
errs = appendError(errs, xmlWrap("TearDown", deploy.Down))
|
||||
}
|
||||
|
||||
if len(errs) != 0 {
|
||||
return fmt.Errorf("encountered %d errors: %v", len(errs), errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Build() error {
|
||||
// The build-release script needs stdin to ask the user whether
|
||||
// it's OK to download the docker image.
|
||||
cmd := exec.Command("make", "docker-build")
|
||||
cmd.Stdin = os.Stdin
|
||||
if err := finishRunning("build-release", cmd); err != nil {
|
||||
return fmt.Errorf("error building: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type deployer interface {
|
||||
Up() error
|
||||
IsUp() error
|
||||
SetupKubecfg() error
|
||||
Down() error
|
||||
}
|
||||
|
||||
func getDeployer() (deployer, error) {
|
||||
switch *deployment {
|
||||
case "bash":
|
||||
return bash{}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown deployment strategy %q", *deployment)
|
||||
}
|
||||
}
|
||||
|
||||
type bash struct{}
|
||||
|
||||
func (b bash) Up() error {
|
||||
return finishRunning("up", exec.Command("./hack/e2e-internal/e2e-up.sh"))
|
||||
}
|
||||
|
||||
func (b bash) IsUp() error {
|
||||
return finishRunning("get status", exec.Command("./hack/e2e-internal/e2e-status.sh"))
|
||||
}
|
||||
|
||||
func (b bash) SetupKubecfg() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b bash) Down() error {
|
||||
return finishRunning("teardown", exec.Command("./hack/e2e-internal/e2e-down.sh"))
|
||||
}
|
||||
|
||||
func DumpClusterLogs(location string) error {
|
||||
log.Printf("Dumping cluster logs to: %v", location)
|
||||
return finishRunning("dump cluster logs", exec.Command("./hack/e2e-internal/log-dump.sh", location))
|
||||
}
|
||||
|
||||
func Test() error {
|
||||
if *testArgs == "" {
|
||||
*testArgs = "--ginkgo.focus=\\[Feature:Ingress\\]"
|
||||
}
|
||||
return finishRunning("Ginkgo tests", exec.Command("./hack/e2e-internal/ginkgo-e2e.sh", strings.Fields(*testArgs)...))
|
||||
}
|
||||
|
||||
func finishRunning(stepName string, cmd *exec.Cmd) error {
|
||||
if *verbose {
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
}
|
||||
log.Printf("Running: %v", stepName)
|
||||
defer func(start time.Time) {
|
||||
log.Printf("Step '%s' finished in %s", stepName, time.Since(start))
|
||||
}(time.Now())
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("error running %v: %v", stepName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -19,7 +19,7 @@ package alias
|
|||
import (
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
|
||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||
"k8s.io/ingress-nginx/pkg/ingress/annotations/parser"
|
||||
)
|
||||
|
||||
const (
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue