diff --git a/.gitignore b/.gitignore index 48939aefe..562090a30 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,9 @@ ._* .DS_Store +# intellij files +.idea/* + # Eclipse files .classpath .project diff --git a/.travis.yml b/.travis.yml index 7cee6fed2..0b5767d5b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,7 +9,7 @@ notifications: email: true go: - - 1.8.1 + - 1.8.3 go_import_path: k8s.io/ingress @@ -19,15 +19,19 @@ env: # docker run --rm caktux/travis-cli encrypt key=value -r kubernetes/ingress - RELEASE="ci-${TRAVIS_BUILD_ID}" -install: - - go get github.com/golang/lint/golint - - go get github.com/mattn/goveralls - - go get github.com/modocache/gover - - if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - before_script: - export PATH=$PATH:$PWD/hack/e2e-internal/ -script: - - make fmt lint vet cover - #- make test-e2e +jobs: + include: + - stage: Static Check + script: + - go get github.com/golang/lint/golint + - make fmt lint vet + - stage: Coverage + script: + - go get github.com/mattn/goveralls + - go get github.com/modocache/gover + - if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - make cover +#- make test-e2e diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5f7ff1f91..cfa44abda 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -19,14 +19,14 @@ Follow either of the two links above to access the appropriate CLA and instructi If you're new to the project and want to help, but don't know where to start, we have a semi-curated list of issues that should not need deep knowledge of the system. [Have a look and see if anything sounds interesting](https://github.com/kubernetes/ingress/issues?utf8=%E2%9C%93&q=is%3Aopen%20is%3Aissue%20label%3A%22help+wanted%22). Alternatively, read some of the docs on other controllers and try to write your own, file and fix any/all issues that come up, including gaps in documentation! -## Contributing A Patch +## Contributing a Patch 1. If you haven't already done so, sign a Contributor License Agreement (see details above). -1. Read the [Ingress development guide](docs/dev/README.md) +1. Read the [Ingress development guide](docs/dev/README.md). 1. Fork the desired repo, develop and test your code changes. 1. Submit a pull request. -All changes must be code reviewed. Coding conventions and standards are explained in the official [developer docs](https://github.com/kubernetes/kubernetes/tree/master/docs/devel). Expect reviewers to request that you avoid common [go style mistakes](https://github.com/golang/go/wiki/CodeReviewComments) in your PRs. +All changes must be code reviewed. Coding conventions and standards are explained in the official [developer docs](https://github.com/kubernetes/community/tree/master/contributors/devel). Expect reviewers to request that you avoid common [go style mistakes](https://github.com/golang/go/wiki/CodeReviewComments) in your PRs. ### Merge Approval diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index d12fd9936..ea37dea66 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -6,11 +6,6 @@ "./..." ], "Deps": [ - { - "ImportPath": "bitbucket.org/ww/goautoneg", - "Comment": "null-5", - "Rev": "75cd24fc2f2c2a2088577d12123ddee5f54e0675" - }, { "ImportPath": "cloud.google.com/go/compute/metadata", "Comment": "v0.1.0-115-g3b1ae45", @@ -40,7 +35,8 @@ }, { "ImportPath": "github.com/davecgh/go-spew/spew", - "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" + "Comment": "v1.1.0-1-g782f496", + "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, { "ImportPath": "github.com/dgrijalva/jwt-go", @@ -49,12 +45,12 @@ }, { "ImportPath": "github.com/docker/distribution/digest", - "Comment": "v2.4.0-rc.1-38-gcd27f17", + "Comment": "v2.4.0-rc.1-38-gcd27f179", "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51" }, { "ImportPath": "github.com/docker/distribution/reference", - "Comment": "v2.4.0-rc.1-38-gcd27f17", + "Comment": "v2.4.0-rc.1-38-gcd27f179", "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51" }, { @@ -80,10 +76,6 @@ "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, - { - "ImportPath": "github.com/go-openapi/analysis", - "Rev": "b44dc874b601d9e4e2f6e19140e794ba24bead3b" - }, { "ImportPath": "github.com/go-openapi/jsonpointer", "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" @@ -92,10 +84,6 @@ "ImportPath": "github.com/go-openapi/jsonreference", "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" }, - { - "ImportPath": "github.com/go-openapi/loads", - "Rev": "18441dfa706d924a39a030ee2c3b1d8d81917b38" - }, { "ImportPath": "github.com/go-openapi/spec", "Rev": "6aced65f8501fe1217321abf0749d354824ba2ff" @@ -126,10 +114,42 @@ "ImportPath": "github.com/golang/protobuf/proto", "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" }, + { + "ImportPath": "github.com/golang/protobuf/ptypes", + "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" + }, + { + "ImportPath": "github.com/golang/protobuf/ptypes/any", + "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" + }, + { + "ImportPath": "github.com/golang/protobuf/ptypes/duration", + "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" + }, + { + "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", + "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" + }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" }, + { + "ImportPath": "github.com/googleapis/gnostic/OpenAPIv2", + "Rev": "68f4ded48ba9414dab2ae69b3f0d69971da73aa5" + }, + { + "ImportPath": "github.com/googleapis/gnostic/compiler", + "Rev": "68f4ded48ba9414dab2ae69b3f0d69971da73aa5" + }, + { + "ImportPath": "github.com/googleapis/gnostic/extensions", + "Rev": "68f4ded48ba9414dab2ae69b3f0d69971da73aa5" + }, + { + "ImportPath": "github.com/gorilla/websocket", + "Rev": "6eb6ad425a89d9da7a5549bc6da8f79ba5c17844" + }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" @@ -140,7 +160,7 @@ }, { "ImportPath": "github.com/howeyc/gopass", - "Rev": "3ca23474a7c7203e0a0a070fd33508f6efdb9b3d" + "Rev": "bf9dde6d0d2c004a008c27aaee91170c786f6db8" }, { "ImportPath": "github.com/imdario/mergo", @@ -149,7 +169,7 @@ }, { "ImportPath": "github.com/juju/ratelimit", - "Rev": "77ed1c8a01217656d2080ad51981f6e99adaa177" + "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" }, { "ImportPath": "github.com/kylelemons/godebug/diff", @@ -175,9 +195,13 @@ "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" }, + { + "ImportPath": "github.com/mitchellh/go-ps", + "Rev": "4fdf99ab29366514c69ccccddab5dc58b8d84062" + }, { "ImportPath": "github.com/mitchellh/mapstructure", - "Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf" + "Rev": "53818660ed4955e899c0bcafa97299a388bd7c8e" }, { "ImportPath": "github.com/ncabatoff/process-exporter", @@ -191,7 +215,7 @@ }, { "ImportPath": "github.com/paultag/sniff/parser", - "Rev": "558797aed1e6daa735d8fada0b863b89d72dcfba" + "Rev": "87325c3dddf408cfb71f5044873d34ac426d5a59" }, { "ImportPath": "github.com/pborman/uuid", @@ -204,18 +228,18 @@ }, { "ImportPath": "github.com/prometheus/client_golang/prometheus", - "Comment": "v0.8.0-81-g42552c1", - "Rev": "42552c195dd3f3089fbf9cf26e139da150af35aa" + "Comment": "v0.8.0-83-ge7e9030", + "Rev": "e7e903064f5e9eb5da98208bae10b475d4db0f8c" }, { "ImportPath": "github.com/prometheus/client_golang/prometheus/promhttp", - "Comment": "v0.8.0-81-g42552c1", - "Rev": "42552c195dd3f3089fbf9cf26e139da150af35aa" + "Comment": "v0.8.0-83-ge7e9030", + "Rev": "e7e903064f5e9eb5da98208bae10b475d4db0f8c" }, { "ImportPath": "github.com/prometheus/client_model/go", - "Comment": "model-0.0.2-14-g6f38060", - "Rev": "6f3806018612930941127f2a7c6c453ba2c527d2" + "Comment": "model-0.0.2-12-gfa8ad6f", + "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" }, { "ImportPath": "github.com/prometheus/common/expfmt", @@ -337,6 +361,14 @@ "ImportPath": "golang.org/x/text/width", "Rev": "2910a502d2bf9e43193af9d68ca516529614eed3" }, + { + "ImportPath": "google.golang.org/api/cloudkms/v1", + "Rev": "e3824ed33c72bf7e81da0286772c34b987520914" + }, + { + "ImportPath": "google.golang.org/api/compute/v0.alpha", + "Rev": "e3824ed33c72bf7e81da0286772c34b987520914" + }, { "ImportPath": "google.golang.org/api/compute/v0.beta", "Rev": "e3824ed33c72bf7e81da0286772c34b987520914" @@ -363,1151 +395,1142 @@ }, { "ImportPath": "gopkg.in/fsnotify.v1", - "Comment": "v1.2.9", - "Rev": "8611c35ab31c1c28aa903d33cf8b6e44a399b09e" + "Comment": "v1.4.2", + "Rev": "629574ca2a5df945712d3079857300b5e4da0236" }, { "ImportPath": "gopkg.in/gcfg.v1", - "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" + "Comment": "v1.2.0", + "Rev": "27e4946190b4a327b539185f2b5b1f7c84730728" }, { "ImportPath": "gopkg.in/gcfg.v1/scanner", - "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" + "Comment": "v1.2.0", + "Rev": "27e4946190b4a327b539185f2b5b1f7c84730728" }, { "ImportPath": "gopkg.in/gcfg.v1/token", - "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" + "Comment": "v1.2.0", + "Rev": "27e4946190b4a327b539185f2b5b1f7c84730728" }, { "ImportPath": "gopkg.in/gcfg.v1/types", - "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" + "Comment": "v1.2.0", + "Rev": "27e4946190b4a327b539185f2b5b1f7c84730728" }, { "ImportPath": "gopkg.in/inf.v0", "Comment": "v0.9.0", "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" }, + { + "ImportPath": "gopkg.in/warnings.v0", + "Comment": "v0.1.1", + "Rev": "8a331561fe74dadba6edfc59f3be66c22c3b065d" + }, { "ImportPath": "gopkg.in/yaml.v2", "Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77" }, + { + "ImportPath": "k8s.io/api/admissionregistration/v1alpha1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/apps/v1beta1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/apps/v1beta2", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/authentication/v1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/authentication/v1beta1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/authorization/v1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/authorization/v1beta1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/autoscaling/v1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/autoscaling/v2alpha1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/batch/v1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/batch/v2alpha1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/certificates/v1beta1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/core/v1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/extensions/v1beta1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/networking/v1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/policy/v1beta1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/rbac/v1alpha1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/rbac/v1beta1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/scheduling/v1alpha1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/settings/v1alpha1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/storage/v1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, + { + "ImportPath": "k8s.io/api/storage/v1beta1", + "Rev": "8dbd8c21124dfb73fba82e2e85fe5f09b48ed89a" + }, { "ImportPath": "k8s.io/apimachinery/pkg/api/equality", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/errors", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/meta", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/resource", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/validation", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/apimachinery", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/apimachinery/announced", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/apimachinery/registered", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/validation", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" + }, + { + "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1alpha1", + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/conversion", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/conversion/queryparams", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" + }, + { + "ImportPath": "k8s.io/apimachinery/pkg/conversion/unstructured", + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/fields", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/labels", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/openapi", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/schema", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/json", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/streaming", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/versioning", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/selection", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/types", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" + }, + { + "ImportPath": "k8s.io/apimachinery/pkg/util/cache", + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" + }, + { + "ImportPath": "k8s.io/apimachinery/pkg/util/clock", + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/diff", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/errors", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/framer", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/intstr", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/json", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/mergepatch", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/net", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/rand", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/runtime", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/sets", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/strategicpatch", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/uuid", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation/field", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/wait", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/yaml", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/version", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/pkg/watch", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/json", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", - "Rev": "7579408ece3cd0671ed2440d1f89616dd5f13bc3" + "Rev": "dc1f89aff9a7509782bde3b68824c8043a3e58cc" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/authenticator", - "Rev": "f71dd4d00dcfe99bf1d43ffda33b31e45db09c75" + "Rev": "7eb7a42b3f4b3aaeccab8fdf89095bb6bea20695" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/serviceaccount", - "Rev": "f71dd4d00dcfe99bf1d43ffda33b31e45db09c75" + "Rev": "7eb7a42b3f4b3aaeccab8fdf89095bb6bea20695" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/user", - "Rev": "f71dd4d00dcfe99bf1d43ffda33b31e45db09c75" + "Rev": "7eb7a42b3f4b3aaeccab8fdf89095bb6bea20695" }, { "ImportPath": "k8s.io/apiserver/pkg/features", - "Rev": "f71dd4d00dcfe99bf1d43ffda33b31e45db09c75" + "Rev": "7eb7a42b3f4b3aaeccab8fdf89095bb6bea20695" }, { "ImportPath": "k8s.io/apiserver/pkg/server/healthz", - "Rev": "f71dd4d00dcfe99bf1d43ffda33b31e45db09c75" + "Rev": "7eb7a42b3f4b3aaeccab8fdf89095bb6bea20695" }, { "ImportPath": "k8s.io/apiserver/pkg/util/feature", - "Rev": "f71dd4d00dcfe99bf1d43ffda33b31e45db09c75" + "Rev": "7eb7a42b3f4b3aaeccab8fdf89095bb6bea20695" }, { "ImportPath": "k8s.io/client-go/discovery", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/discovery/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/admissionregistration", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/admissionregistration/v1alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/apps", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/apps/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/apps/v1beta2", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/autoscaling", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/autoscaling/v1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/autoscaling/v2alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/batch", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/batch/v1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/batch/v2alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/certificates", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/certificates/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/core", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/core/v1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/extensions", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/extensions/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/internalinterfaces", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/networking", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/networking/v1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/policy", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/policy/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/rbac", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/rbac/v1alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/rbac/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/scheduling", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/scheduling/v1alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/settings", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/settings/v1alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/storage", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/storage/v1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/informers/storage/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/scheme", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1/fake", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/listers/admissionregistration/v1alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/listers/apps/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/listers/apps/v1beta2", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/listers/autoscaling/v1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/listers/autoscaling/v2alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/listers/batch/v1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/listers/batch/v2alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/listers/certificates/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/listers/core/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { - "ImportPath": "k8s.io/client-go/pkg/api", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "ImportPath": "k8s.io/client-go/listers/extensions/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { - "ImportPath": "k8s.io/client-go/pkg/api/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "ImportPath": "k8s.io/client-go/listers/networking/v1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { - "ImportPath": "k8s.io/client-go/pkg/api/v1/ref", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "ImportPath": "k8s.io/client-go/listers/policy/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { - "ImportPath": "k8s.io/client-go/pkg/apis/apps", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "ImportPath": "k8s.io/client-go/listers/rbac/v1alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { - "ImportPath": "k8s.io/client-go/pkg/apis/apps/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "ImportPath": "k8s.io/client-go/listers/rbac/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { - "ImportPath": "k8s.io/client-go/pkg/apis/authentication", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "ImportPath": "k8s.io/client-go/listers/scheduling/v1alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { - "ImportPath": "k8s.io/client-go/pkg/apis/authentication/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "ImportPath": "k8s.io/client-go/listers/settings/v1alpha1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { - "ImportPath": "k8s.io/client-go/pkg/apis/authentication/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "ImportPath": "k8s.io/client-go/listers/storage/v1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { - "ImportPath": "k8s.io/client-go/pkg/apis/authorization", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/authorization/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/authorization/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/autoscaling", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/autoscaling/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/batch", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/batch/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/batch/v2alpha1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/certificates", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/certificates/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/extensions", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/extensions/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/policy", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/policy/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/rbac", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/rbac/v1alpha1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/rbac/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/settings", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/settings/v1alpha1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/storage", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/storage/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/apis/storage/v1beta1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/util", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" - }, - { - "ImportPath": "k8s.io/client-go/pkg/util/parsers", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "ImportPath": "k8s.io/client-go/listers/storage/v1beta1", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/pkg/version", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/rest", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/rest/watch", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/testing", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/tools/auth", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/tools/cache", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/tools/cache/testing", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc9", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api/latest", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api/v1", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/tools/leaderelection", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/tools/leaderelection/resourcelock", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/tools/metrics", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/tools/record", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/client-go/tools/reference", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/transport", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/util/cert", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { - "ImportPath": "k8s.io/client-go/util/clock", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "ImportPath": "k8s.io/client-go/util/cert/triple", + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc9", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/util/flowcontrol", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/util/homedir", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/util/integer", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" }, { "ImportPath": "k8s.io/client-go/util/workqueue", - "Comment": "v2.0.0-alpha.0-301-g1f73522", - "Rev": "1f73522a95aff1dc043c593cda8eed3cedc2edd8" + "Comment": "v2.0.0-alpha.0-488-gc7ed6bc", + "Rev": "c7ed6bc9c1c981e0f0bd09dc046c9b81ab855c24" + }, + { + "ImportPath": "k8s.io/kube-openapi/pkg/common", + "Rev": "80f07ef71bb4f781233c65aa8d0369e4ecafab87" }, { "ImportPath": "k8s.io/kubernetes/pkg/api", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/helper", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/service", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/util", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/helper", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/pod", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/v1/ref", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/service", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/validation", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/settings", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/v1alpha1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/install", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/capabilities", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/retry", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/features", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis", + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/master/ports", + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/apparmor", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/serviceaccount", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/exec", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "ImportPath": "k8s.io/kubernetes/pkg/util/file", + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/hash", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/io", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/mount", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/net/sets", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/parsers", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/pointer", + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/sysctl", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/taints", + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/version", + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util", - "Comment": "v1.7.0-alpha.4-155-g97c2b70fe3", - "Rev": "97c2b70fe316d1368169c193e72f201f0cc33d47" + "Comment": "v1.8.0-alpha.2-1030-ga1c0510d00", + "Rev": "a1c0510d006ccff9be8478f86635c86658c9bf73" + }, + { + "ImportPath": "k8s.io/utils/exec", + "Rev": "9fdc871a36f37980dd85f96d576b20d564cc0784" } ] } diff --git a/Makefile b/Makefile index ce21c9178..2c1188922 100644 --- a/Makefile +++ b/Makefile @@ -61,11 +61,15 @@ controllers: .PHONY: docker-build docker-build: - make -C controllers/nginx container + make -C controllers/nginx all-container .PHONY: docker-push docker-push: - make -C controllers/nginx push + make -C controllers/nginx all-push + +.PHONE: release +release: + make -C controllers/nginx release .PHONY: ginkgo ginkgo: diff --git a/README.md b/README.md index c51815b17..cc19b9a72 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![Build Status](https://travis-ci.org/kubernetes/ingress.svg?branch=master)](https://travis-ci.org/kubernetes/ingress) [![Coverage Status](https://coveralls.io/repos/github/kubernetes/ingress/badge.svg?branch=master)](https://coveralls.io/github/kubernetes/ingress) [![Go Report Card](https://goreportcard.com/badge/github.com/kubernetes/ingress)](https://goreportcard.com/report/github.com/kubernetes/ingress) +[![GoDoc](https://godoc.org/github.com/kubernetes/ingress?status.svg)](https://godoc.org/github.com/kubernetes/ingress) ## Description diff --git a/controllers/README.md b/controllers/README.md index e62cf55b6..e27700d0e 100644 --- a/controllers/README.md +++ b/controllers/README.md @@ -1,11 +1,11 @@ # Ingress controllers -This directory contains ingress controllers. -======= -# Ingress Controllers +This directory contains Ingress controllers. -Configuring a webserver or loadbalancer is harder than it should be. Most webserver configuration files are very similar. There are some applications that have weird little quirks that tend to throw a wrench in things, but for the most part you can apply the same logic to them and achieve a desired result. The Ingress resource embodies this idea, and an Ingress controller is meant to handle all the quirks associated with a specific "class" of Ingress (be it a single instance of a loadbalancer, or a more complicated setup of frontends that provide GSLB, DDoS protection etc). +Configuring a webserver or loadbalancer is harder than it should be. Most webserver configuration files are very similar. There are some applications that have weird little quirks that tend to throw a wrench in things, but for the most part you can apply the same logic to them and achieve a desired result. + +The Ingress resource embodies this idea, and an Ingress controller is meant to handle all the quirks associated with a specific "class" of Ingress (be it a single instance of a loadbalancer, or a more complicated setup of frontends that provide GSLB, DDoS protection, etc). ## What is an Ingress Controller? -An Ingress Controller is a daemon, deployed as a Kubernetes Pod, that watches the apiserver's `/ingresses` endpoint for updates to the [Ingress resource](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/ingress.md). Its job is to satisfy requests for ingress. +An Ingress Controller is a daemon, deployed as a Kubernetes Pod, that watches the apiserver's `/ingresses` endpoint for updates to the [Ingress resource](https://kubernetes.io/docs/concepts/services-networking/ingress/). Its job is to satisfy requests for Ingresses. diff --git a/controllers/gce/BETA_LIMITATIONS.md b/controllers/gce/BETA_LIMITATIONS.md index f0f479a1b..e2d92a3fe 100644 --- a/controllers/gce/BETA_LIMITATIONS.md +++ b/controllers/gce/BETA_LIMITATIONS.md @@ -1,6 +1,6 @@ # GLBC: Beta limitations -As of the Kubernetes 1.2 release, the GCE L7 Loadbalancer controller is still a *beta* product. We expect it to go GA in 1.3. +As of the Kubernetes 1.7 release, the GCE L7 Loadbalancer controller is still a *beta* product. This is a list of beta limitations: @@ -13,13 +13,13 @@ This is a list of beta limitations: * [Large clusters](#large-clusters): Ingress on GCE isn't supported on large (>1000 nodes), single-zone clusters. * [Teardown](README.md#deletion): The recommended way to tear down a cluster with active Ingresses is to either delete each Ingress, or hit the `/delete-all-and-quit` endpoint on GLBC, before invoking a cluster teardown script (eg: kube-down.sh). You will have to manually cleanup GCE resources through the [cloud console](https://cloud.google.com/compute/docs/console#access) or [gcloud CLI](https://cloud.google.com/compute/docs/gcloud-compute/) if you simply tear down the cluster with active Ingresses. * [Changing UIDs](#changing-the-cluster-uid): You can change the UID used as a suffix for all your GCE cloud resources, but this requires you to delete existing Ingresses first. -* [Cleaning up](#cleaning-up-cloud-resources): You can delete loadbalancers that older clusters might've leaked due to permature teardown through the GCE console. +* [Cleaning up](#cleaning-up-cloud-resources): You can delete loadbalancers that older clusters might've leaked due to premature teardown through the GCE console. ## Prerequisites Before you can receive traffic through the GCE L7 Loadbalancer Controller you need: * A Working Kubernetes cluster >= 1.1 -* At least 1 Kubernetes [NodePort Service](../../../../docs/user-guide/services.md#type-nodeport) (this is the endpoint for your Ingress) +* At least 1 Kubernetes NodePort Service (this is the endpoint for your Ingress) * A single instance of the L7 Loadbalancer Controller pod, if you're running Kubernetes < 1.3 (the GCP ingress controller runs on the master in later versions) ## Quota @@ -172,7 +172,6 @@ If you deleted a GKE/GCE cluster without first deleting the associated Ingresses 1. Navigate to the [cloud console](https://console.cloud.google.com/) and click on the "Networking" tab, then choose "LoadBalancing" 2. Find the loadbalancer you'd like to delete, it should have a name formatted as: k8s-um-ns-name--UUID -3. Delete it, check the boxes to also casade the deletion down to associated resources (eg: backend-services) +3. Delete it, check the boxes to also cascade the deletion down to associated resources (eg: backend-services) 4. Switch to the "Compute Engine" tab, then choose "Instance Groups" 5. Delete the Instance Group allocated for the leaked Ingress, it should have a name formatted as: k8s-ig-UUID - diff --git a/controllers/gce/Makefile b/controllers/gce/Makefile index 158058422..02d8e97fe 100644 --- a/controllers/gce/Makefile +++ b/controllers/gce/Makefile @@ -1,7 +1,7 @@ all: push # 0.0 shouldn't clobber any released builds -TAG = 0.9.4 +TAG = 0.9.6 PREFIX = gcr.io/google_containers/glbc server: diff --git a/controllers/gce/README.md b/controllers/gce/README.md index eac403a31..8f1498dbd 100644 --- a/controllers/gce/README.md +++ b/controllers/gce/README.md @@ -53,7 +53,7 @@ __Lines 8-9__: Each http rule contains the following information: A host (eg: fo __Lines 10-12__: A `backend` is a service:port combination. It selects a group of pods capable of servicing traffic sent to the path specified in the parent rule. The `port` is the desired `spec.ports[*].port` from the Service Spec -- Note, though, that the L7 actually directs traffic to the corresponding `NodePort`. -__Global Prameters__: For the sake of simplicity the example Ingress has no global parameters. However, one can specify a default backend (see examples below) in the absence of which requests that don't match a path in the spec are sent to the default backend of glbc. Though glbc doesn't support HTTPS yet, security configs would also be global. +__Global Parameters__: For the sake of simplicity the example Ingress has no global parameters. However, one can specify a default backend (see examples below) in the absence of which requests that don't match a path in the spec are sent to the default backend of glbc. ## Load Balancer Management @@ -135,7 +135,7 @@ Go to your GCE console and confirm that the following resources have been create * BackendServices (one for each Kubernetes nodePort service) * An Instance Group (with ports corresponding to the BackendServices) -The HTTPLoadBalancing panel will also show you if your backends have responded to the health checks, wait till they do. This can take a few minutes. If you see `Health status will display here once configuration is complete.` the L7 is still bootstrapping. Wait till you have `Healthy instances: X`. Even though the GCE L7 is driven by our controller, which notices the Kubernetes healtchecks of a pod, we still need to wait on the first GCE L7 health check to complete. Once your backends are up and healthy: +The HTTPLoadBalancing panel will also show you if your backends have responded to the health checks, wait till they do. This can take a few minutes. If you see `Health status will display here once configuration is complete.` the L7 is still bootstrapping. Wait till you have `Healthy instances: X`. Even though the GCE L7 is driven by our controller, which notices the Kubernetes healthchecks of a pod, we still need to wait on the first GCE L7 health check to complete. Once your backends are up and healthy: ```shell $ curl --resolve foo.bar.com:80:107.178.245.239 http://foo.bar.com/foo @@ -245,7 +245,7 @@ spec: app: nginxtest ``` -Running kubectl create against this manifest will given you a service with multiple endpoints: +Running kubectl create against this manifest will give you a service with multiple endpoints: ```shell $ kubectl get svc nginxtest -o yaml | grep -i nodeport: nodePort: 30404 @@ -281,7 +281,7 @@ nginx-tester-pod-name ``` Note what just happened, the endpoint exposes /hostname, and the loadbalancer forwarded the entire matching url to the endpoint. This means if you had '/foo' in the Ingress and tried accessing /hostname, your endpoint would've received /foo/hostname and not known how to route it. Now update the Ingress to access static content via the /fs endpoint: -``` +```yaml apiVersion: extensions/v1beta1 kind: Ingress metadata: @@ -300,7 +300,7 @@ As before, wait a while for the update to take effect, and try accessing `loadba #### Deletion -Most production loadbalancers live as long as the nodes in the cluster and are torn down when the nodes are destroyed. That said, there are plenty of use cases for deleting an Ingress, deleting a loadbalancer controller, or just purging external loadbalancer resources alltogether. Deleting a loadbalancer controller pod will not affect the loadbalancers themselves, this way your backends won't suffer a loss of availability if the scheduler pre-empts your controller pod. Deleting a single loadbalancer is as easy as deleting an Ingress via kubectl: +Most production loadbalancers live as long as the nodes in the cluster and are torn down when the nodes are destroyed. That said, there are plenty of use cases for deleting an Ingress, deleting a loadbalancer controller, or just purging external loadbalancer resources altogether. Deleting a loadbalancer controller pod will not affect the loadbalancers themselves, this way your backends won't suffer a loss of availability if the scheduler pre-empts your controller pod. Deleting a single loadbalancer is as easy as deleting an Ingress via kubectl: ```shell $ kubectl delete ing echomap $ kubectl logs --follow glbc-6m6b6 l7-lb-controller @@ -313,7 +313,7 @@ I1007 00:26:02.043188 1 backends.go:134] Deleting backend k8-be-30301 I1007 00:26:05.591140 1 backends.go:134] Deleting backend k8-be-30284 I1007 00:26:09.159016 1 controller.go:232] Finished syncing default/echomap ``` -Note that it takes ~30 seconds to purge cloud resources, the API calls to create and delete are a one time cost. GCE BackendServices are ref-counted and deleted by the controller as you delete Kubernetes Ingress'. This is not sufficient for cleanup, because you might have deleted the Ingress while glbc was down, in which case it would leak cloud resources. You can delete the glbc and purge cloud resources in 2 more ways: +Note that it takes ~30 seconds to purge cloud resources, the API calls to create and delete are a onetime cost. GCE BackendServices are ref-counted and deleted by the controller as you delete Kubernetes Ingress'. This is not sufficient for cleanup, because you might have deleted the Ingress while glbc was down, in which case it would leak cloud resources. You can delete the glbc and purge cloud resources in 2 more ways: __The dev/test way__: If you want to delete everything in the cloud when the loadbalancer controller pod dies, start it with the --delete-all-on-quit flag. When a pod is killed it's first sent a SIGTERM, followed by a grace period (set to 10minutes for loadbalancer controllers), followed by a SIGKILL. The controller pod uses this time to delete cloud resources. Be careful with --delete-all-on-quit, because if you're running a production glbc and the scheduler re-schedules your pod for some reason, it will result in a loss of availability. You can do this because your rc.yaml has: ```yaml @@ -327,7 +327,7 @@ So simply delete the replication controller: $ kubectl get rc glbc CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE glbc default-http-backend gcr.io/google_containers/defaultbackend:1.0 k8s-app=glbc,version=v0.5 1 2m - l7-lb-controller gcr.io/google_containers/glbc:0.9.4 + l7-lb-controller gcr.io/google_containers/glbc:0.9.6 $ kubectl delete rc glbc replicationcontroller "glbc" deleted @@ -339,7 +339,7 @@ glbc-6m6b6 1/1 Terminating 0 13m __The prod way__: If you didn't start the controller with `--delete-all-on-quit`, you can execute a GET on the `/delete-all-and-quit` endpoint. This endpoint is deliberately not exported. -``` +```shell $ kubectl exec -it glbc-6m6b6 -- wget -q -O- http://localhost:8081/delete-all-and-quit ..Hangs till quit is done.. @@ -399,7 +399,7 @@ spec: This creates 2 GCE forwarding rules that use a single static ip. Both `:80` and `:443` will direct traffic to your backend, which serves HTTP requests on the target port mentioned in the Service associated with the Ingress. ## Backend HTTPS -For encrypted communication between the load balancer and your Kubernetes service, you need to decorate the the service's port as expecting HTTPS. There's an alpha [Service annotation](examples/backside_https/app.yaml) for specifying the expected protocol per service port. Upon seeing the protocol as HTTPS, the ingress controller will assemble a GCP L7 load balancer with an HTTPS backend-service with a HTTPS health check. +For encrypted communication between the load balancer and your Kubernetes service, you need to decorate the service's port as expecting HTTPS. There's an alpha [Service annotation](examples/backside_https/app.yaml) for specifying the expected protocol per service port. Upon seeing the protocol as HTTPS, the ingress controller will assemble a GCP L7 load balancer with an HTTPS backend-service with a HTTPS health check. The annotation value is a stringified JSON map of port-name to "HTTPS" or "HTTP". If you do not specify the port, "HTTP" is assumed. ```yaml @@ -698,7 +698,7 @@ The controller manages cloud resources through a notion of pools. Each pool is t Periodically, each pool checks that it has a valid connection to the next hop in the above resource graph. So for example, the backend pool will check that each backend is connected to the instance group and that the node ports match, the instance group will check that all the Kubernetes nodes are a part of the instance group, and so on. Since Backends are a limited resource, they're shared (well, everything is limited by your quota, this applies doubly to backend services). This means you can setup N Ingress' exposing M services through different paths and the controller will only create M backends. When all the Ingress' are deleted, the backend pool GCs the backend. -## Wishlist: +## Wish list: * More E2e, integration tests * Better events diff --git a/controllers/gce/backends/backends.go b/controllers/gce/backends/backends.go index 5c66b3dfa..49b632efe 100644 --- a/controllers/gce/backends/backends.go +++ b/controllers/gce/backends/backends.go @@ -26,10 +26,11 @@ import ( "github.com/golang/glog" compute "google.golang.org/api/compute/v1" + + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" - api_v1 "k8s.io/client-go/pkg/api/v1" "k8s.io/ingress/controllers/gce/healthchecks" "k8s.io/ingress/controllers/gce/instances" @@ -161,7 +162,7 @@ func (b *Backends) Init(pp probeProvider) { // Get returns a single backend. func (b *Backends) Get(port int64) (*compute.BackendService, error) { - be, err := b.cloud.GetBackendService(b.namer.BeName(port)) + be, err := b.cloud.GetGlobalBackendService(b.namer.BeName(port)) if err != nil { return nil, err } @@ -171,13 +172,22 @@ func (b *Backends) Get(port int64) (*compute.BackendService, error) { func (b *Backends) ensureHealthCheck(sp ServicePort) (string, error) { hc := b.healthChecker.New(sp.Port, sp.Protocol) - if b.prober != nil { + + existingLegacyHC, err := b.healthChecker.GetLegacy(sp.Port) + if err != nil && !utils.IsNotFoundError(err) { + return "", err + } + + if existingLegacyHC != nil { + glog.V(4).Infof("Applying settings of existing health check to newer health check on port %+v", sp) + applyLegacyHCToHC(existingLegacyHC, hc) + } else if b.prober != nil { probe, err := b.prober.GetProbe(sp) if err != nil { return "", err } if probe != nil { - glog.V(2).Infof("Applying httpGet settings of readinessProbe to health check on port %+v", sp) + glog.V(4).Infof("Applying httpGet settings of readinessProbe to health check on port %+v", sp) applyProbeSettingsToHC(probe, hc) } } @@ -194,22 +204,29 @@ func (b *Backends) create(namedPort *compute.NamedPort, hcLink string, sp Servic Port: namedPort.Port, PortName: namedPort.Name, } - if err := b.cloud.CreateBackendService(bs); err != nil { + if err := b.cloud.CreateGlobalBackendService(bs); err != nil { return nil, err } return b.Get(namedPort.Port) } // Add will get or create a Backend for the given port. -func (b *Backends) Add(p ServicePort) error { +// Uses the given instance groups if non-nil, else creates instance groups. +func (b *Backends) Add(p ServicePort, igs []*compute.InstanceGroup) error { // We must track the port even if creating the backend failed, because // we might've created a health-check for it. be := &compute.BackendService{} defer func() { b.snapshotter.Add(portKey(p.Port), be) }() - igs, namedPort, err := b.nodePool.AddInstanceGroup(b.namer.IGName(), p.Port) - if err != nil { - return err + var err error + // Ideally callers should pass the instance groups to prevent recomputing them here. + // Igs can be nil in scenarios where we do not have instance groups such as + // while syncing default backend service. + if igs == nil { + igs, _, err = instances.EnsureInstanceGroupsAndPorts(b.nodePool, b.namer, p.Port) + if err != nil { + return err + } } // Ensure health check for backend service exists @@ -222,6 +239,7 @@ func (b *Backends) Add(p ServicePort) error { pName := b.namer.BeName(p.Port) be, _ = b.Get(p.Port) if be == nil { + namedPort := utils.GetNamedPort(p.Port) glog.V(2).Infof("Creating backend service for port %v named port %v", p.Port, namedPort) be, err = b.create(namedPort, hcLink, p, pName) if err != nil { @@ -239,7 +257,7 @@ func (b *Backends) Add(p ServicePort) error { be.Protocol = string(p.Protocol) be.HealthChecks = []string{hcLink} be.Description = p.Description() - if err = b.cloud.UpdateBackendService(be); err != nil { + if err = b.cloud.UpdateGlobalBackendService(be); err != nil { return err } } @@ -247,7 +265,7 @@ func (b *Backends) Add(p ServicePort) error { // If previous health check was legacy type, we need to delete it. if existingHCLink != hcLink && strings.Contains(existingHCLink, "/httpHealthChecks/") { if err = b.healthChecker.DeleteLegacy(p.Port); err != nil { - return err + glog.Warning("Failed to delete legacy HttpHealthCheck %v; Will not try again, err: %v", pName, err) } } @@ -273,7 +291,7 @@ func (b *Backends) Delete(port int64) (err error) { } }() // Try deleting health checks even if a backend is not found. - if err = b.cloud.DeleteBackendService(name); err != nil && !utils.IsHTTPErrorCode(err, http.StatusNotFound) { + if err = b.cloud.DeleteGlobalBackendService(name); err != nil && !utils.IsHTTPErrorCode(err, http.StatusNotFound) { return err } @@ -285,7 +303,7 @@ func (b *Backends) List() ([]interface{}, error) { // TODO: for consistency with the rest of this sub-package this method // should return a list of backend ports. interList := []interface{}{} - be, err := b.cloud.ListBackendServices() + be, err := b.cloud.ListGlobalBackendServices() if err != nil { return interList, err } @@ -352,7 +370,7 @@ func (b *Backends) edgeHop(be *compute.BackendService, igs []*compute.InstanceGr newBackends := getBackendsForIGs(addIGs, bm) be.Backends = append(originalBackends, newBackends...) - if err := b.cloud.UpdateBackendService(be); err != nil { + if err := b.cloud.UpdateGlobalBackendService(be); err != nil { if utils.IsHTTPErrorCode(err, http.StatusBadRequest) { glog.V(2).Infof("Updating backend service backends with balancing mode %v failed, will try another mode. err:%v", bm, err) errs = append(errs, err.Error()) @@ -371,12 +389,12 @@ func (b *Backends) edgeHop(be *compute.BackendService, igs []*compute.InstanceGr } // Sync syncs backend services corresponding to ports in the given list. -func (b *Backends) Sync(svcNodePorts []ServicePort) error { +func (b *Backends) Sync(svcNodePorts []ServicePort, igs []*compute.InstanceGroup) error { glog.V(3).Infof("Sync: backends %v", svcNodePorts) // create backends for new ports, perform an edge hop for existing ports for _, port := range svcNodePorts { - if err := b.Add(port); err != nil { + if err := b.Add(port, igs); err != nil { return err } } @@ -418,14 +436,14 @@ func (b *Backends) Shutdown() error { // Status returns the status of the given backend by name. func (b *Backends) Status(name string) string { - backend, err := b.cloud.GetBackendService(name) + backend, err := b.cloud.GetGlobalBackendService(name) if err != nil || len(backend.Backends) == 0 { return "Unknown" } // TODO: Look at more than one backend's status // TODO: Include port, ip in the status, since it's in the health info. - hs, err := b.cloud.GetHealth(name, backend.Backends[0].Group) + hs, err := b.cloud.GetGlobalBackendServiceHealth(name, backend.Backends[0].Group) if err != nil || len(hs.HealthStatus) == 0 || hs.HealthStatus[0] == nil { return "Unknown" } @@ -433,15 +451,34 @@ func (b *Backends) Status(name string) string { return hs.HealthStatus[0].HealthState } -func applyProbeSettingsToHC(p *api_v1.Probe, hc *healthchecks.HealthCheck) { +func applyLegacyHCToHC(existing *compute.HttpHealthCheck, hc *healthchecks.HealthCheck) { + hc.Description = existing.Description + hc.CheckIntervalSec = existing.CheckIntervalSec + hc.HealthyThreshold = existing.HealthyThreshold + hc.Host = existing.Host + hc.Port = existing.Port + hc.RequestPath = existing.RequestPath + hc.TimeoutSec = existing.TimeoutSec + hc.UnhealthyThreshold = existing.UnhealthyThreshold +} + +func applyProbeSettingsToHC(p *v1.Probe, hc *healthchecks.HealthCheck) { healthPath := p.Handler.HTTPGet.Path // GCE requires a leading "/" for health check urls. if !strings.HasPrefix(healthPath, "/") { healthPath = "/" + healthPath } + // Extract host from HTTP headers + host := p.Handler.HTTPGet.Host + for _, header := range p.Handler.HTTPGet.HTTPHeaders { + if header.Name == "Host" { + host = header.Value + break + } + } hc.RequestPath = healthPath - hc.Host = p.Handler.HTTPGet.Host + hc.Host = host hc.Description = "Kubernetes L7 health check generated with readiness probe settings." hc.CheckIntervalSec = int64(p.PeriodSeconds) + int64(healthchecks.DefaultHealthCheckInterval.Seconds()) hc.TimeoutSec = int64(p.TimeoutSeconds) diff --git a/controllers/gce/backends/backends_test.go b/controllers/gce/backends/backends_test.go index 99cfd45ba..8d116ac6e 100644 --- a/controllers/gce/backends/backends_test.go +++ b/controllers/gce/backends/backends_test.go @@ -23,9 +23,9 @@ import ( compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" + api_v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" - api_v1 "k8s.io/client-go/pkg/api/v1" "k8s.io/ingress/controllers/gce/healthchecks" "k8s.io/ingress/controllers/gce/instances" @@ -50,21 +50,23 @@ var existingProbe = &api_v1.Probe{ }, } -func newBackendPool(f BackendServices, fakeIGs instances.InstanceGroups, syncWithCloud bool) *Backends { +func newTestJig(f BackendServices, fakeIGs instances.InstanceGroups, syncWithCloud bool) (*Backends, healthchecks.HealthCheckProvider) { namer := &utils.Namer{} nodePool := instances.NewNodePool(fakeIGs) nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}}) - healthChecks := healthchecks.NewHealthChecker(healthchecks.NewFakeHealthCheckProvider(), "/", namer) + healthCheckProvider := healthchecks.NewFakeHealthCheckProvider() + healthChecks := healthchecks.NewHealthChecker(healthCheckProvider, "/", namer) bp := NewBackendPool(f, healthChecks, nodePool, namer, []int64{}, syncWithCloud) probes := map[ServicePort]*api_v1.Probe{{Port: 443, Protocol: utils.ProtocolHTTPS}: existingProbe} bp.Init(NewFakeProbeProvider(probes)) - return bp + + return bp, healthCheckProvider } func TestBackendPoolAdd(t *testing.T) { f := NewFakeBackendServices(noOpErrFunc) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) - pool := newBackendPool(f, fakeIGs, false) + pool, _ := newTestJig(f, fakeIGs, false) namer := utils.Namer{} testCases := []ServicePort{ @@ -78,14 +80,14 @@ func TestBackendPoolAdd(t *testing.T) { // Add a backend for a port, then re-add the same port and // make sure it corrects a broken link from the backend to // the instance group. - err := pool.Add(nodePort) + err := pool.Add(nodePort, nil) if err != nil { t.Fatalf("Did not find expect error when adding a nodeport: %v, err: %v", nodePort, err) } beName := namer.BeName(nodePort.Port) // Check that the new backend has the right port - be, err := f.GetBackendService(beName) + be, err := f.GetGlobalBackendService(beName) if err != nil { t.Fatalf("Did not find expected backend %v", beName) } @@ -105,7 +107,6 @@ func TestBackendPoolAdd(t *testing.T) { } // Check the created healthcheck is the correct protocol - // pool.healthChecker. hc, err := pool.healthChecker.Get(nodePort.Port) if err != nil { t.Fatalf("Unexpected err when querying fake healthchecker: %v", err) @@ -122,17 +123,55 @@ func TestBackendPoolAdd(t *testing.T) { } } +func TestHealthCheckMigration(t *testing.T) { + f := NewFakeBackendServices(noOpErrFunc) + fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) + pool, hcp := newTestJig(f, fakeIGs, false) + namer := utils.Namer{} + + p := ServicePort{Port: 7000, Protocol: utils.ProtocolHTTP} + + // Create a legacy health check and insert it into the HC provider. + legacyHC := &compute.HttpHealthCheck{ + Name: namer.BeName(p.Port), + RequestPath: "/my-healthz-path", + Host: "k8s.io", + Description: "My custom HC", + UnhealthyThreshold: 30, + CheckIntervalSec: 40, + } + hcp.CreateHttpHealthCheck(legacyHC) + + // Add the service port to the backend pool + pool.Add(p, nil) + + // Assert the proper health check was created + hc, _ := pool.healthChecker.Get(p.Port) + if hc == nil || hc.Protocol() != p.Protocol { + t.Fatalf("Expected %s health check, received %v: ", p.Protocol, hc) + } + + // Assert the newer health check has the legacy health check settings + if hc.RequestPath != legacyHC.RequestPath || + hc.Host != legacyHC.Host || + hc.UnhealthyThreshold != legacyHC.UnhealthyThreshold || + hc.CheckIntervalSec != legacyHC.CheckIntervalSec || + hc.Description != legacyHC.Description { + t.Fatalf("Expected newer health check to have identical settings to legacy health check. Legacy: %+v, New: %+v", legacyHC, hc) + } +} + func TestBackendPoolUpdate(t *testing.T) { f := NewFakeBackendServices(noOpErrFunc) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) - pool := newBackendPool(f, fakeIGs, false) + pool, _ := newTestJig(f, fakeIGs, false) namer := utils.Namer{} p := ServicePort{Port: 3000, Protocol: utils.ProtocolHTTP} - pool.Add(p) + pool.Add(p, nil) beName := namer.BeName(p.Port) - be, err := f.GetBackendService(beName) + be, err := f.GetGlobalBackendService(beName) if err != nil { t.Fatalf("Unexpected err: %v", err) } @@ -149,9 +188,9 @@ func TestBackendPoolUpdate(t *testing.T) { // Update service port to encrypted p.Protocol = utils.ProtocolHTTPS - pool.Sync([]ServicePort{p}) + pool.Sync([]ServicePort{p}, nil) - be, err = f.GetBackendService(beName) + be, err = f.GetGlobalBackendService(beName) if err != nil { t.Fatalf("Unexpected err retrieving backend service after update: %v", err) } @@ -171,14 +210,14 @@ func TestBackendPoolUpdate(t *testing.T) { func TestBackendPoolChaosMonkey(t *testing.T) { f := NewFakeBackendServices(noOpErrFunc) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) - pool := newBackendPool(f, fakeIGs, false) + pool, _ := newTestJig(f, fakeIGs, false) namer := utils.Namer{} nodePort := ServicePort{Port: 8080, Protocol: utils.ProtocolHTTP} - pool.Add(nodePort) + pool.Add(nodePort, nil) beName := namer.BeName(nodePort.Port) - be, _ := f.GetBackendService(beName) + be, _ := f.GetGlobalBackendService(beName) // Mess up the link between backend service and instance group. // This simulates a user doing foolish things through the UI. @@ -186,15 +225,15 @@ func TestBackendPoolChaosMonkey(t *testing.T) { {Group: "test edge hop"}, } f.calls = []int{} - f.UpdateBackendService(be) + f.UpdateGlobalBackendService(be) - pool.Add(nodePort) + pool.Add(nodePort, nil) for _, call := range f.calls { if call == utils.Create { t.Fatalf("Unexpected create for existing backend service") } } - gotBackend, err := f.GetBackendService(beName) + gotBackend, err := f.GetGlobalBackendService(beName) if err != nil { t.Fatalf("Failed to find a backend with name %v: %v", beName, err) } @@ -220,10 +259,10 @@ func TestBackendPoolSync(t *testing.T) { svcNodePorts := []ServicePort{{Port: 81, Protocol: utils.ProtocolHTTP}, {Port: 82, Protocol: utils.ProtocolHTTPS}, {Port: 83, Protocol: utils.ProtocolHTTP}} f := NewFakeBackendServices(noOpErrFunc) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) - pool := newBackendPool(f, fakeIGs, true) - pool.Add(ServicePort{Port: 81}) - pool.Add(ServicePort{Port: 90}) - if err := pool.Sync(svcNodePorts); err != nil { + pool, _ := newTestJig(f, fakeIGs, true) + pool.Add(ServicePort{Port: 81}, nil) + pool.Add(ServicePort{Port: 90}, nil) + if err := pool.Sync(svcNodePorts, nil); err != nil { t.Errorf("Expected backend pool to sync, err: %v", err) } if err := pool.GC(svcNodePorts); err != nil { @@ -257,12 +296,12 @@ func TestBackendPoolSync(t *testing.T) { // k8s-be-3001--uid - another cluster tagged with uid unrelatedBackends := sets.NewString([]string{"foo", "k8s-be-foo", "k8s--bar--foo", "k8s-be-30001--uid"}...) for _, name := range unrelatedBackends.List() { - f.CreateBackendService(&compute.BackendService{Name: name}) + f.CreateGlobalBackendService(&compute.BackendService{Name: name}) } namer := &utils.Namer{} // This backend should get deleted again since it is managed by this cluster. - f.CreateBackendService(&compute.BackendService{Name: namer.BeName(deletedPorts[0].Port)}) + f.CreateGlobalBackendService(&compute.BackendService{Name: namer.BeName(deletedPorts[0].Port)}) // TODO: Avoid casting. // Repopulate the pool with a cloud list, which now includes the 82 port @@ -272,7 +311,7 @@ func TestBackendPoolSync(t *testing.T) { pool.GC(svcNodePorts) - currBackends, _ := f.ListBackendServices() + currBackends, _ := f.ListGlobalBackendServices() currSet := sets.NewString() for _, b := range currBackends.Items { currSet.Insert(b.Name) @@ -316,13 +355,13 @@ func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) { } // Create backend service with expected name and link to legacy health check - f.CreateBackendService(&compute.BackendService{ + f.CreateGlobalBackendService(&compute.BackendService{ Name: beName, HealthChecks: []string{hc.SelfLink}, }) // Have pool sync the above backend service - bp.Add(ServicePort{Port: 80, Protocol: utils.ProtocolHTTPS}) + bp.Add(ServicePort{Port: 80, Protocol: utils.ProtocolHTTPS}, nil) // Verify the legacy health check has been deleted _, err = hcp.GetHttpHealthCheck(beName) @@ -345,13 +384,13 @@ func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) { func TestBackendPoolShutdown(t *testing.T) { f := NewFakeBackendServices(noOpErrFunc) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) - pool := newBackendPool(f, fakeIGs, false) + pool, _ := newTestJig(f, fakeIGs, false) namer := utils.Namer{} // Add a backend-service and verify that it doesn't exist after Shutdown() - pool.Add(ServicePort{Port: 80}) + pool.Add(ServicePort{Port: 80}, nil) pool.Shutdown() - if _, err := f.GetBackendService(namer.BeName(80)); err == nil { + if _, err := f.GetGlobalBackendService(namer.BeName(80)); err == nil { t.Fatalf("%v", err) } } @@ -359,13 +398,13 @@ func TestBackendPoolShutdown(t *testing.T) { func TestBackendInstanceGroupClobbering(t *testing.T) { f := NewFakeBackendServices(noOpErrFunc) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) - pool := newBackendPool(f, fakeIGs, false) + pool, _ := newTestJig(f, fakeIGs, false) namer := utils.Namer{} // This will add the instance group k8s-ig to the instance pool - pool.Add(ServicePort{Port: 80}) + pool.Add(ServicePort{Port: 80}, nil) - be, err := f.GetBackendService(namer.BeName(80)) + be, err := f.GetGlobalBackendService(namer.BeName(80)) if err != nil { t.Fatalf("%v", err) } @@ -376,13 +415,13 @@ func TestBackendInstanceGroupClobbering(t *testing.T) { {Group: "k8s-ig-foo"}, } be.Backends = append(be.Backends, newGroups...) - if err = f.UpdateBackendService(be); err != nil { + if err = f.UpdateGlobalBackendService(be); err != nil { t.Fatalf("Failed to update backend service %v", be.Name) } // Make sure repeated adds don't clobber the inserted instance group - pool.Add(ServicePort{Port: 80}) - be, err = f.GetBackendService(namer.BeName(80)) + pool.Add(ServicePort{Port: 80}, nil) + be, err = f.GetGlobalBackendService(namer.BeName(80)) if err != nil { t.Fatalf("%v", err) } @@ -405,7 +444,7 @@ func TestBackendCreateBalancingMode(t *testing.T) { f := NewFakeBackendServices(noOpErrFunc) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) - pool := newBackendPool(f, fakeIGs, false) + pool, _ := newTestJig(f, fakeIGs, false) namer := utils.Namer{} nodePort := ServicePort{Port: 8080} modes := []BalancingMode{Rate, Utilization} @@ -423,8 +462,8 @@ func TestBackendCreateBalancingMode(t *testing.T) { return nil } - pool.Add(nodePort) - be, err := f.GetBackendService(namer.BeName(nodePort.Port)) + pool.Add(nodePort, nil) + be, err := f.GetGlobalBackendService(namer.BeName(nodePort.Port)) if err != nil { t.Fatalf("%v", err) } diff --git a/controllers/gce/backends/fakes.go b/controllers/gce/backends/fakes.go index 4ad8d0c28..a054d8d2a 100644 --- a/controllers/gce/backends/fakes.go +++ b/controllers/gce/backends/fakes.go @@ -20,7 +20,7 @@ import ( "fmt" compute "google.golang.org/api/compute/v1" - api_v1 "k8s.io/client-go/pkg/api/v1" + api_v1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/ingress/controllers/gce/utils" @@ -44,8 +44,8 @@ type FakeBackendServices struct { errFunc func(op int, be *compute.BackendService) error } -// GetBackendService fakes getting a backend service from the cloud. -func (f *FakeBackendServices) GetBackendService(name string) (*compute.BackendService, error) { +// GetGlobalBackendService fakes getting a backend service from the cloud. +func (f *FakeBackendServices) GetGlobalBackendService(name string) (*compute.BackendService, error) { f.calls = append(f.calls, utils.Get) obj, exists, err := f.backendServices.GetByKey(name) if !exists { @@ -62,8 +62,8 @@ func (f *FakeBackendServices) GetBackendService(name string) (*compute.BackendSe return nil, fmt.Errorf("backend service %v not found", name) } -// CreateBackendService fakes backend service creation. -func (f *FakeBackendServices) CreateBackendService(be *compute.BackendService) error { +// CreateGlobalBackendService fakes backend service creation. +func (f *FakeBackendServices) CreateGlobalBackendService(be *compute.BackendService) error { if f.errFunc != nil { if err := f.errFunc(utils.Create, be); err != nil { return err @@ -74,8 +74,8 @@ func (f *FakeBackendServices) CreateBackendService(be *compute.BackendService) e return f.backendServices.Update(be) } -// DeleteBackendService fakes backend service deletion. -func (f *FakeBackendServices) DeleteBackendService(name string) error { +// DeleteGlobalBackendService fakes backend service deletion. +func (f *FakeBackendServices) DeleteGlobalBackendService(name string) error { f.calls = append(f.calls, utils.Delete) svc, exists, err := f.backendServices.GetByKey(name) if !exists { @@ -87,8 +87,8 @@ func (f *FakeBackendServices) DeleteBackendService(name string) error { return f.backendServices.Delete(svc) } -// ListBackendServices fakes backend service listing. -func (f *FakeBackendServices) ListBackendServices() (*compute.BackendServiceList, error) { +// ListGlobalBackendServices fakes backend service listing. +func (f *FakeBackendServices) ListGlobalBackendServices() (*compute.BackendServiceList, error) { var svcs []*compute.BackendService for _, s := range f.backendServices.List() { svc := s.(*compute.BackendService) @@ -97,8 +97,8 @@ func (f *FakeBackendServices) ListBackendServices() (*compute.BackendServiceList return &compute.BackendServiceList{Items: svcs}, nil } -// UpdateBackendService fakes updating a backend service. -func (f *FakeBackendServices) UpdateBackendService(be *compute.BackendService) error { +// UpdateGlobalBackendService fakes updating a backend service. +func (f *FakeBackendServices) UpdateGlobalBackendService(be *compute.BackendService) error { if f.errFunc != nil { if err := f.errFunc(utils.Update, be); err != nil { return err @@ -108,9 +108,9 @@ func (f *FakeBackendServices) UpdateBackendService(be *compute.BackendService) e return f.backendServices.Update(be) } -// GetHealth fakes getting backend service health. -func (f *FakeBackendServices) GetHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) { - be, err := f.GetBackendService(name) +// GetGlobalBackendServiceHealth fakes getting backend service health. +func (f *FakeBackendServices) GetGlobalBackendServiceHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) { + be, err := f.GetGlobalBackendService(name) if err != nil { return nil, err } diff --git a/controllers/gce/backends/interfaces.go b/controllers/gce/backends/interfaces.go index c24fc88a3..586ceb17c 100644 --- a/controllers/gce/backends/interfaces.go +++ b/controllers/gce/backends/interfaces.go @@ -18,7 +18,7 @@ package backends import ( compute "google.golang.org/api/compute/v1" - api_v1 "k8s.io/client-go/pkg/api/v1" + api_v1 "k8s.io/api/core/v1" ) // ProbeProvider retrieves a probe struct given a nodePort @@ -30,10 +30,10 @@ type probeProvider interface { // as gce backendServices, and sync them through the BackendServices interface. type BackendPool interface { Init(p probeProvider) - Add(port ServicePort) error + Add(port ServicePort, igs []*compute.InstanceGroup) error Get(port int64) (*compute.BackendService, error) Delete(port int64) error - Sync(ports []ServicePort) error + Sync(ports []ServicePort, igs []*compute.InstanceGroup) error GC(ports []ServicePort) error Shutdown() error Status(name string) string @@ -42,10 +42,10 @@ type BackendPool interface { // BackendServices is an interface for managing gce backend services. type BackendServices interface { - GetBackendService(name string) (*compute.BackendService, error) - UpdateBackendService(bg *compute.BackendService) error - CreateBackendService(bg *compute.BackendService) error - DeleteBackendService(name string) error - ListBackendServices() (*compute.BackendServiceList, error) - GetHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) + GetGlobalBackendService(name string) (*compute.BackendService, error) + UpdateGlobalBackendService(bg *compute.BackendService) error + CreateGlobalBackendService(bg *compute.BackendService) error + DeleteGlobalBackendService(name string) error + ListGlobalBackendServices() (*compute.BackendServiceList, error) + GetGlobalBackendServiceHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) } diff --git a/controllers/gce/controller/cluster_manager.go b/controllers/gce/controller/cluster_manager.go index 2f2a0836f..e987711b9 100644 --- a/controllers/gce/controller/cluster_manager.go +++ b/controllers/gce/controller/cluster_manager.go @@ -17,14 +17,11 @@ limitations under the License. package controller import ( - "io" "net/http" - "os" - "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/cloudprovider" + compute "google.golang.org/api/compute/v1" gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/ingress/controllers/gce/backends" @@ -57,9 +54,6 @@ const ( // Names longer than this are truncated, because of GCE restrictions. nameLenLimit = 62 - - // Sleep interval to retry cloud client creation. - cloudClientRetryInterval = 10 * time.Second ) // ClusterManager manages cluster resource pools. @@ -115,41 +109,45 @@ func (c *ClusterManager) shutdown() error { } // Checkpoint performs a checkpoint with the cloud. -// - lbNames are the names of L7 loadbalancers we wish to exist. If they already +// - lbs are the single cluster L7 loadbalancers we wish to exist. If they already // exist, they should not have any broken links between say, a UrlMap and // TargetHttpProxy. // - nodeNames are the names of nodes we wish to add to all loadbalancer // instance groups. -// - nodePorts are the ports for which we require BackendServices. Each of -// these ports must also be opened on the corresponding Instance Group. +// - backendServicePorts are the ports for which we require BackendServices. +// - namedPorts are the ports which must be opened on instance groups. +// Returns the list of all instance groups corresponding to the given loadbalancers. // If in performing the checkpoint the cluster manager runs out of quota, a // googleapi 403 is returned. -func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeNames []string, nodePorts []backends.ServicePort) error { +func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeNames []string, backendServicePorts []backends.ServicePort, namedPorts []backends.ServicePort) ([]*compute.InstanceGroup, error) { + if len(namedPorts) != 0 { + // Add the default backend node port to the list of named ports for instance groups. + namedPorts = append(namedPorts, c.defaultBackendNodePort) + } // Multiple ingress paths can point to the same service (and hence nodePort) // but each nodePort can only have one set of cloud resources behind it. So // don't waste time double validating GCE BackendServices. - portMap := map[int64]backends.ServicePort{} - for _, p := range nodePorts { - portMap[p.Port] = p + namedPorts = uniq(namedPorts) + backendServicePorts = uniq(backendServicePorts) + // Create Instance Groups. + igs, err := c.EnsureInstanceGroupsAndPorts(namedPorts) + if err != nil { + return igs, err } - nodePorts = []backends.ServicePort{} - for _, sp := range portMap { - nodePorts = append(nodePorts, sp) - } - if err := c.backendPool.Sync(nodePorts); err != nil { - return err + if err := c.backendPool.Sync(backendServicePorts, igs); err != nil { + return igs, err } if err := c.instancePool.Sync(nodeNames); err != nil { - return err + return igs, err } if err := c.l7Pool.Sync(lbs); err != nil { - return err + return igs, err } // TODO: Manage default backend and its firewall rule in a centralized way. // DefaultBackend is managed in l7 pool, which doesn't understand instances, // which the firewall rule requires. - fwNodePorts := nodePorts + fwNodePorts := backendServicePorts if len(lbs) != 0 { // If there are no Ingresses, we shouldn't be allowing traffic to the // default backend. Equally importantly if the cluster gets torn down @@ -162,10 +160,27 @@ func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeName np = append(np, p.Port) } if err := c.firewallPool.Sync(np, nodeNames); err != nil { - return err + return igs, err } - return nil + return igs, nil +} + +func (c *ClusterManager) EnsureInstanceGroupsAndPorts(servicePorts []backends.ServicePort) ([]*compute.InstanceGroup, error) { + var igs []*compute.InstanceGroup + var err error + for _, p := range servicePorts { + // EnsureInstanceGroupsAndPorts always returns all the instance groups, so we can return + // the output of any call, no need to append the return from all calls. + // TODO: Ideally, we want to call CreateInstaceGroups only the first time and + // then call AddNamedPort multiple times. Need to update the interface to + // achieve this. + igs, _, err = instances.EnsureInstanceGroupsAndPorts(c.instancePool, c.ClusterNamer, p.Port) + if err != nil { + return nil, err + } + } + return igs, nil } // GC garbage collects unused resources. @@ -209,65 +224,17 @@ func (c *ClusterManager) GC(lbNames []string, nodePorts []backends.ServicePort) return nil } -func getGCEClient(config io.Reader) *gce.GCECloud { - // Creating the cloud interface involves resolving the metadata server to get - // an oauth token. If this fails, the token provider assumes it's not on GCE. - // No errors are thrown. So we need to keep retrying till it works because - // we know we're on GCE. - for { - cloudInterface, err := cloudprovider.GetCloudProvider("gce", config) - if err == nil { - cloud := cloudInterface.(*gce.GCECloud) - - // If this controller is scheduled on a node without compute/rw - // it won't be allowed to list backends. We can assume that the - // user has no need for Ingress in this case. If they grant - // permissions to the node they will have to restart the controller - // manually to re-create the client. - if _, err = cloud.ListBackendServices(); err == nil || utils.IsHTTPErrorCode(err, http.StatusForbidden) { - return cloud - } - glog.Warningf("Failed to list backend services, retrying: %v", err) - } else { - glog.Warningf("Failed to retrieve cloud interface, retrying: %v", err) - } - time.Sleep(cloudClientRetryInterval) - } -} - // NewClusterManager creates a cluster manager for shared resources. // - namer: is the namer used to tag cluster wide shared resources. // - defaultBackendNodePort: is the node port of glbc's default backend. This is // the kubernetes Service that serves the 404 page if no urls match. // - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz". func NewClusterManager( - configFilePath string, + cloud *gce.GCECloud, namer *utils.Namer, defaultBackendNodePort backends.ServicePort, defaultHealthCheckPath string) (*ClusterManager, error) { - // TODO: Make this more resilient. Currently we create the cloud client - // and pass it through to all the pools. This makes unit testing easier. - // However if the cloud client suddenly fails, we should try to re-create it - // and continue. - var cloud *gce.GCECloud - if configFilePath != "" { - glog.Infof("Reading config from path %v", configFilePath) - config, err := os.Open(configFilePath) - if err != nil { - return nil, err - } - defer config.Close() - cloud = getGCEClient(config) - glog.Infof("Successfully loaded cloudprovider using config %q", configFilePath) - } else { - // While you might be tempted to refactor so we simply assing nil to the - // config and only invoke getGCEClient once, that will not do the right - // thing because a nil check against an interface isn't true in golang. - cloud = getGCEClient(nil) - glog.Infof("Created GCE client without a config file") - } - // Names are fundamental to the cluster, the uid allocator makes sure names don't collide. cluster := ClusterManager{ClusterNamer: namer} diff --git a/controllers/gce/controller/controller.go b/controllers/gce/controller/controller.go index 31983ff81..e0a8de1fa 100644 --- a/controllers/gce/controller/controller.go +++ b/controllers/gce/controller/controller.go @@ -18,26 +18,25 @@ package controller import ( "fmt" - "net/http" "reflect" "sync" "time" "github.com/golang/glog" + apiv1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" + informerv1 "k8s.io/client-go/informers/core/v1" + informerv1beta1 "k8s.io/client-go/informers/extensions/v1beta1" "k8s.io/client-go/kubernetes" scheme "k8s.io/client-go/kubernetes/scheme" unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1" listers "k8s.io/client-go/listers/core/v1" - api_v1 "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/ingress/controllers/gce/loadbalancers" - "k8s.io/ingress/controllers/gce/utils" ) var ( @@ -55,17 +54,45 @@ var ( storeSyncPollPeriod = 5 * time.Second ) +// ControllerContext holds +type ControllerContext struct { + IngressInformer cache.SharedIndexInformer + ServiceInformer cache.SharedIndexInformer + PodInformer cache.SharedIndexInformer + NodeInformer cache.SharedIndexInformer + // Stop is the stop channel shared among controllers + StopCh chan struct{} +} + +func NewControllerContext(kubeClient kubernetes.Interface, namespace string, resyncPeriod time.Duration) *ControllerContext { + return &ControllerContext{ + IngressInformer: informerv1beta1.NewIngressInformer(kubeClient, namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}), + ServiceInformer: informerv1.NewServiceInformer(kubeClient, namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}), + PodInformer: informerv1.NewPodInformer(kubeClient, namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}), + NodeInformer: informerv1.NewNodeInformer(kubeClient, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}), + StopCh: make(chan struct{}), + } +} + +func (ctx *ControllerContext) Start() { + go ctx.IngressInformer.Run(ctx.StopCh) + go ctx.ServiceInformer.Run(ctx.StopCh) + go ctx.PodInformer.Run(ctx.StopCh) + go ctx.NodeInformer.Run(ctx.StopCh) +} + // LoadBalancerController watches the kubernetes api and adds/removes services // from the loadbalancer, via loadBalancerConfig. type LoadBalancerController struct { - client kubernetes.Interface - ingController cache.Controller - nodeController cache.Controller - svcController cache.Controller - podController cache.Controller - ingLister StoreToIngressLister - nodeLister StoreToNodeLister - svcLister StoreToServiceLister + client kubernetes.Interface + + ingressSynced cache.InformerSynced + serviceSynced cache.InformerSynced + podSynced cache.InformerSynced + nodeSynced cache.InformerSynced + ingLister StoreToIngressLister + nodeLister StoreToNodeLister + svcLister StoreToServiceLister // Health checks are the readiness probes of containers on pods. podLister StoreToPodLister // TODO: Watch secrets @@ -92,7 +119,7 @@ type LoadBalancerController struct { // - clusterManager: A ClusterManager capable of creating all cloud resources // required for L7 loadbalancing. // - resyncPeriod: Watchers relist from the Kubernetes API server this often. -func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) { +func NewLoadBalancerController(kubeClient kubernetes.Interface, ctx *ControllerContext, clusterManager *ClusterManager) (*LoadBalancerController, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{ @@ -101,28 +128,37 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager * lbc := LoadBalancerController{ client: kubeClient, CloudClusterManager: clusterManager, - stopCh: make(chan struct{}), + stopCh: ctx.StopCh, recorder: eventBroadcaster.NewRecorder(scheme.Scheme, - api_v1.EventSource{Component: "loadbalancer-controller"}), + apiv1.EventSource{Component: "loadbalancer-controller"}), } lbc.nodeQueue = NewTaskQueue(lbc.syncNodes) lbc.ingQueue = NewTaskQueue(lbc.sync) lbc.hasSynced = lbc.storesSynced - // Ingress watch handlers - pathHandlers := cache.ResourceEventHandlerFuncs{ + lbc.ingressSynced = ctx.IngressInformer.HasSynced + lbc.serviceSynced = ctx.ServiceInformer.HasSynced + lbc.podSynced = ctx.PodInformer.HasSynced + lbc.nodeSynced = ctx.NodeInformer.HasSynced + + lbc.ingLister.Store = ctx.IngressInformer.GetStore() + lbc.svcLister.Indexer = ctx.ServiceInformer.GetIndexer() + lbc.podLister.Indexer = ctx.PodInformer.GetIndexer() + lbc.nodeLister.Indexer = ctx.NodeInformer.GetIndexer() + // ingress event handler + ctx.IngressInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { addIng := obj.(*extensions.Ingress) - if !isGCEIngress(addIng) { + if !isGCEIngress(addIng) && !isGCEMultiClusterIngress(addIng) { glog.Infof("Ignoring add for ingress %v based on annotation %v", addIng.Name, ingressClassKey) return } - lbc.recorder.Eventf(addIng, api_v1.EventTypeNormal, "ADD", fmt.Sprintf("%s/%s", addIng.Namespace, addIng.Name)) + lbc.recorder.Eventf(addIng, apiv1.EventTypeNormal, "ADD", fmt.Sprintf("%s/%s", addIng.Namespace, addIng.Name)) lbc.ingQueue.enqueue(obj) }, DeleteFunc: func(obj interface{}) { delIng := obj.(*extensions.Ingress) - if !isGCEIngress(delIng) { + if !isGCEIngress(delIng) && !isGCEMultiClusterIngress(delIng) { glog.Infof("Ignoring delete for ingress %v based on annotation %v", delIng.Name, ingressClassKey) return } @@ -131,7 +167,7 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager * }, UpdateFunc: func(old, cur interface{}) { curIng := cur.(*extensions.Ingress) - if !isGCEIngress(curIng) { + if !isGCEIngress(curIng) && !isGCEMultiClusterIngress(curIng) { return } if !reflect.DeepEqual(old, cur) { @@ -139,13 +175,10 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager * } lbc.ingQueue.enqueue(cur) }, - } - lbc.ingLister.Store, lbc.ingController = cache.NewInformer( - cache.NewListWatchFromClient(lbc.client.Extensions().RESTClient(), "ingresses", namespace, fields.Everything()), - &extensions.Ingress{}, resyncPeriod, pathHandlers) + }) - // Service watch handlers - svcHandlers := cache.ResourceEventHandlerFuncs{ + // service event handler + ctx.ServiceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: lbc.enqueueIngressForService, UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { @@ -153,32 +186,14 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager * } }, // Ingress deletes matter, service deletes don't. - } + }) - lbc.svcLister.Indexer, lbc.svcController = cache.NewIndexerInformer( - cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "services", namespace, fields.Everything()), - &api_v1.Service{}, - resyncPeriod, - svcHandlers, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - ) - - lbc.podLister.Indexer, lbc.podController = cache.NewIndexerInformer( - cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "pods", namespace, fields.Everything()), - &api_v1.Pod{}, - resyncPeriod, - cache.ResourceEventHandlerFuncs{}, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - ) - - // Node watch handlers - lbc.nodeLister.Indexer, lbc.nodeController = cache.NewIndexerInformer( - cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "nodes", api_v1.NamespaceAll, fields.Everything()), - &api_v1.Node{}, - resyncPeriod, - cache.ResourceEventHandlerFuncs{}, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - ) + // node event handler + ctx.NodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: lbc.nodeQueue.enqueue, + DeleteFunc: lbc.nodeQueue.enqueue, + // Nodes are updated every 10s and we don't care, so no update handler. + }) lbc.tr = &GCETranslator{&lbc} lbc.tlsLoader = &apiServerTLSLoader{client: lbc.client} @@ -189,7 +204,7 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager * // enqueueIngressForService enqueues all the Ingress' for a Service. func (lbc *LoadBalancerController) enqueueIngressForService(obj interface{}) { - svc := obj.(*api_v1.Service) + svc := obj.(*apiv1.Service) ings, err := lbc.ingLister.GetServiceIngress(svc) if err != nil { glog.V(5).Infof("ignoring service %v: %v", svc.Name, err) @@ -206,10 +221,6 @@ func (lbc *LoadBalancerController) enqueueIngressForService(obj interface{}) { // Run starts the loadbalancer controller. func (lbc *LoadBalancerController) Run() { glog.Infof("Starting loadbalancer controller") - go lbc.ingController.Run(lbc.stopCh) - go lbc.nodeController.Run(lbc.stopCh) - go lbc.svcController.Run(lbc.stopCh) - go lbc.podController.Run(lbc.stopCh) go lbc.ingQueue.run(time.Second, lbc.stopCh) go lbc.nodeQueue.run(time.Second, lbc.stopCh) <-lbc.stopCh @@ -246,14 +257,14 @@ func (lbc *LoadBalancerController) storesSynced() bool { return ( // wait for pods to sync so we don't allocate a default health check when // an endpoint has a readiness probe. - lbc.podController.HasSynced() && + lbc.podSynced() && // wait for services so we don't thrash on backend creation. - lbc.svcController.HasSynced() && + lbc.serviceSynced() && // wait for nodes so we don't disconnect a backend from an instance // group just because we don't realize there are nodes in that zone. - lbc.nodeController.HasSynced() && + lbc.nodeSynced() && // Wait for ingresses as a safety measure. We don't really need this. - lbc.ingController.HasSynced()) + lbc.ingressSynced()) } // sync manages Ingress create/updates/deletes. @@ -264,13 +275,19 @@ func (lbc *LoadBalancerController) sync(key string) (err error) { } glog.V(3).Infof("Syncing %v", key) - ingresses, err := lbc.ingLister.List() + allIngresses, err := lbc.ingLister.ListAll() if err != nil { return err } - nodePorts := lbc.tr.toNodePorts(&ingresses) + gceIngresses, err := lbc.ingLister.ListGCEIngresses() + if err != nil { + return err + } + + allNodePorts := lbc.tr.toNodePorts(&allIngresses) + gceNodePorts := lbc.tr.toNodePorts(&gceIngresses) lbNames := lbc.ingLister.Store.ListKeys() - lbs, err := lbc.ListRuntimeInfo() + lbs, err := lbc.toRuntimeInfo(gceIngresses) if err != nil { return err } @@ -296,22 +313,19 @@ func (lbc *LoadBalancerController) sync(key string) (err error) { var syncError error defer func() { - if deferErr := lbc.CloudClusterManager.GC(lbNames, nodePorts); deferErr != nil { + if deferErr := lbc.CloudClusterManager.GC(lbNames, allNodePorts); deferErr != nil { err = fmt.Errorf("error during sync %v, error during GC %v", syncError, deferErr) } glog.V(3).Infof("Finished syncing %v", key) }() - // Record any errors during sync and throw a single error at the end. This // allows us to free up associated cloud resources ASAP. - if err := lbc.CloudClusterManager.Checkpoint(lbs, nodeNames, nodePorts); err != nil { + igs, err := lbc.CloudClusterManager.Checkpoint(lbs, nodeNames, gceNodePorts, allNodePorts) + if err != nil { // TODO: Implement proper backoff for the queue. eventMsg := "GCE" - if utils.IsHTTPErrorCode(err, http.StatusForbidden) { - eventMsg += " :Quota" - } if ingExists { - lbc.recorder.Eventf(obj.(*extensions.Ingress), api_v1.EventTypeWarning, eventMsg, err.Error()) + lbc.recorder.Eventf(obj.(*extensions.Ingress), apiv1.EventTypeWarning, eventMsg, err.Error()) } else { err = fmt.Errorf("%v, error: %v", eventMsg, err) } @@ -321,6 +335,22 @@ func (lbc *LoadBalancerController) sync(key string) (err error) { if !ingExists { return syncError } + ing := *obj.(*extensions.Ingress) + if isGCEMultiClusterIngress(&ing) { + // Add instance group names as annotation on the ingress. + if ing.Annotations == nil { + ing.Annotations = map[string]string{} + } + err = setInstanceGroupsAnnotation(ing.Annotations, igs) + if err != nil { + return err + } + if err := lbc.updateAnnotations(ing.Name, ing.Namespace, ing.Annotations); err != nil { + return err + } + return nil + } + // Update the UrlMap of the single loadbalancer that came through the watch. l7, err := lbc.CloudClusterManager.l7Pool.Get(key) if err != nil { @@ -328,14 +358,13 @@ func (lbc *LoadBalancerController) sync(key string) (err error) { return syncError } - ing := *obj.(*extensions.Ingress) if urlMap, err := lbc.tr.toURLMap(&ing); err != nil { syncError = fmt.Errorf("%v, convert to url map error %v", syncError, err) } else if err := l7.UpdateUrlMap(urlMap); err != nil { - lbc.recorder.Eventf(&ing, api_v1.EventTypeWarning, "UrlMap", err.Error()) + lbc.recorder.Eventf(&ing, apiv1.EventTypeWarning, "UrlMap", err.Error()) syncError = fmt.Errorf("%v, update url map error: %v", syncError, err) } else if err := lbc.updateIngressStatus(l7, ing); err != nil { - lbc.recorder.Eventf(&ing, api_v1.EventTypeWarning, "Status", err.Error()) + lbc.recorder.Eventf(&ing, apiv1.EventTypeWarning, "Status", err.Error()) syncError = fmt.Errorf("%v, update ingress error: %v", syncError, err) } return syncError @@ -353,8 +382,8 @@ func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing return err } currIng.Status = extensions.IngressStatus{ - LoadBalancer: api_v1.LoadBalancerStatus{ - Ingress: []api_v1.LoadBalancerIngress{ + LoadBalancer: apiv1.LoadBalancerStatus{ + Ingress: []apiv1.LoadBalancerIngress{ {IP: ip}, }, }, @@ -368,17 +397,26 @@ func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing if _, err := ingClient.UpdateStatus(currIng); err != nil { return err } - lbc.recorder.Eventf(currIng, api_v1.EventTypeNormal, "CREATE", "ip: %v", ip) + lbc.recorder.Eventf(currIng, apiv1.EventTypeNormal, "CREATE", "ip: %v", ip) } } + annotations := loadbalancers.GetLBAnnotations(l7, currIng.Annotations, lbc.CloudClusterManager.backendPool) + if err := lbc.updateAnnotations(ing.Name, ing.Namespace, annotations); err != nil { + return err + } + return nil +} + +func (lbc *LoadBalancerController) updateAnnotations(name, namespace string, annotations map[string]string) error { // Update annotations through /update endpoint - currIng, err = ingClient.Get(ing.Name, metav1.GetOptions{}) + ingClient := lbc.client.Extensions().Ingresses(namespace) + currIng, err := ingClient.Get(name, metav1.GetOptions{}) if err != nil { return err } - currIng.Annotations = loadbalancers.GetLBAnnotations(l7, currIng.Annotations, lbc.CloudClusterManager.backendPool) - if !reflect.DeepEqual(ing.Annotations, currIng.Annotations) { - glog.V(3).Infof("Updating annotations of %v/%v", ing.Namespace, ing.Name) + if !reflect.DeepEqual(currIng.Annotations, annotations) { + glog.V(3).Infof("Updating annotations of %v/%v", namespace, name) + currIng.Annotations = annotations if _, err := ingClient.Update(currIng); err != nil { return err } @@ -386,12 +424,8 @@ func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing return nil } -// ListRuntimeInfo lists L7RuntimeInfo as understood by the loadbalancer module. -func (lbc *LoadBalancerController) ListRuntimeInfo() (lbs []*loadbalancers.L7RuntimeInfo, err error) { - ingList, err := lbc.ingLister.List() - if err != nil { - return lbs, err - } +// toRuntimeInfo returns L7RuntimeInfo for the given ingresses. +func (lbc *LoadBalancerController) toRuntimeInfo(ingList extensions.IngressList) (lbs []*loadbalancers.L7RuntimeInfo, err error) { for _, ing := range ingList.Items { k, err := keyFunc(&ing) if err != nil { @@ -436,11 +470,11 @@ func (lbc *LoadBalancerController) syncNodes(key string) error { } func getNodeReadyPredicate() listers.NodeConditionPredicate { - return func(node *api_v1.Node) bool { + return func(node *apiv1.Node) bool { for ix := range node.Status.Conditions { condition := &node.Status.Conditions[ix] - if condition.Type == api_v1.NodeReady { - return condition.Status == api_v1.ConditionTrue + if condition.Type == apiv1.NodeReady { + return condition.Status == apiv1.ConditionTrue } } return false diff --git a/controllers/gce/controller/controller_test.go b/controllers/gce/controller/controller_test.go index 6f16fd542..b12d01f1d 100644 --- a/controllers/gce/controller/controller_test.go +++ b/controllers/gce/controller/controller_test.go @@ -24,14 +24,14 @@ import ( compute "google.golang.org/api/compute/v1" + api_v1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/api" "k8s.io/ingress/controllers/gce/firewalls" "k8s.io/ingress/controllers/gce/loadbalancers" @@ -53,7 +53,8 @@ func defaultBackendName(clusterName string) string { // newLoadBalancerController create a loadbalancer controller. func newLoadBalancerController(t *testing.T, cm *fakeClusterManager) *LoadBalancerController { kubeClient := fake.NewSimpleClientset() - lb, err := NewLoadBalancerController(kubeClient, cm.ClusterManager, 1*time.Second, api_v1.NamespaceAll) + ctx := NewControllerContext(kubeClient, api_v1.NamespaceAll, 1*time.Second) + lb, err := NewLoadBalancerController(kubeClient, ctx, cm.ClusterManager) if err != nil { t.Fatalf("%v", err) } @@ -428,7 +429,7 @@ func TestLbChangeStaticIP(t *testing.T) { } ing.Annotations = map[string]string{staticIPNameKey: "testip"} - cm.fakeLbs.ReserveGlobalStaticIP("testip", "1.2.3.4") + cm.fakeLbs.ReserveGlobalAddress(&compute.Address{Name: "testip", Address: "1.2.3.4"}) // Second sync reassigns 1.2.3.4 to existing forwarding rule (by recreating it) lbc.sync(ingStoreKey) diff --git a/controllers/gce/controller/fakes.go b/controllers/gce/controller/fakes.go index 1ad6881bf..113460e12 100644 --- a/controllers/gce/controller/fakes.go +++ b/controllers/gce/controller/fakes.go @@ -65,7 +65,7 @@ func NewFakeClusterManager(clusterName, firewallName string) *fakeClusterManager testDefaultBeNodePort, namer, ) - frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallsProvider(namer), namer) + frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallsProvider(), namer) cm := &ClusterManager{ ClusterNamer: namer, instancePool: nodePool, diff --git a/controllers/gce/controller/tls.go b/controllers/gce/controller/tls.go index 6795cc0d7..a44f16405 100644 --- a/controllers/gce/controller/tls.go +++ b/controllers/gce/controller/tls.go @@ -21,10 +21,10 @@ import ( "github.com/golang/glog" + api_v1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - api_v1 "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/controllers/gce/loadbalancers" ) diff --git a/controllers/gce/controller/utils.go b/controllers/gce/controller/utils.go index fab840b59..d313097f5 100644 --- a/controllers/gce/controller/utils.go +++ b/controllers/gce/controller/utils.go @@ -26,6 +26,9 @@ import ( "github.com/golang/glog" compute "google.golang.org/api/compute/v1" + + api_v1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/meta" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -34,8 +37,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" listers "k8s.io/client-go/listers/core/v1" - api_v1 "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -75,12 +76,19 @@ const ( // ingressClassKey picks a specific "class" for the Ingress. The controller // only processes Ingresses with this annotation either unset, or set // to either gceIngessClass or the empty string. - ingressClassKey = "kubernetes.io/ingress.class" - gceIngressClass = "gce" + ingressClassKey = "kubernetes.io/ingress.class" + gceIngressClass = "gce" + gceMultiIngressClass = "gce-multi-cluster" // Label key to denote which GCE zone a Kubernetes node is in. zoneKey = "failure-domain.beta.kubernetes.io/zone" defaultZone = "" + + // instanceGroupsAnnotationKey is the annotation key used by controller to + // specify the name and zone of instance groups created for the ingress. + // This is read only for users. Controller will overrite any user updates. + // This is only set for ingresses with ingressClass = "gce-multi-cluster" + instanceGroupsAnnotationKey = "ingress.gcp.kubernetes.io/instance-groups" ) // ingAnnotations represents Ingress annotations. @@ -156,6 +164,13 @@ func isGCEIngress(ing *extensions.Ingress) bool { return class == "" || class == gceIngressClass } +// isGCEMultiClusterIngress returns true if the given Ingress has +// ingress.class annotation set to "gce-multi-cluster". +func isGCEMultiClusterIngress(ing *extensions.Ingress) bool { + class := ingAnnotations(ing.ObjectMeta.Annotations).ingressClass() + return class == gceMultiIngressClass +} + // errorNodePortNotFound is an implementation of error. type errorNodePortNotFound struct { backend extensions.IngressBackend @@ -285,8 +300,19 @@ func ListAll(store cache.Store, selector labels.Selector, appendFn cache.AppendF return nil } -// List lists all Ingress' in the store. -func (s *StoreToIngressLister) List() (ing extensions.IngressList, err error) { +// List lists all Ingress' in the store (both single and multi cluster ingresses). +func (s *StoreToIngressLister) ListAll() (ing extensions.IngressList, err error) { + for _, m := range s.Store.List() { + newIng := m.(*extensions.Ingress) + if isGCEIngress(newIng) || isGCEMultiClusterIngress(newIng) { + ing.Items = append(ing.Items, *newIng) + } + } + return ing, nil +} + +// ListGCEIngresses lists all GCE Ingress' in the store. +func (s *StoreToIngressLister) ListGCEIngresses() (ing extensions.IngressList, err error) { for _, m := range s.Store.List() { newIng := m.(*extensions.Ingress) if isGCEIngress(newIng) { @@ -471,32 +497,39 @@ PortLoop: return p, nil } -// toNodePorts converts a pathlist to a flat list of nodeports. +// toNodePorts is a helper method over ingressToNodePorts to process a list of ingresses. func (t *GCETranslator) toNodePorts(ings *extensions.IngressList) []backends.ServicePort { var knownPorts []backends.ServicePort for _, ing := range ings.Items { - defaultBackend := ing.Spec.Backend - if defaultBackend != nil { - port, err := t.getServiceNodePort(*defaultBackend, ing.Namespace) + knownPorts = append(knownPorts, t.ingressToNodePorts(&ing)...) + } + return knownPorts +} + +// ingressToNodePorts converts a pathlist to a flat list of nodeports for the given ingress. +func (t *GCETranslator) ingressToNodePorts(ing *extensions.Ingress) []backends.ServicePort { + var knownPorts []backends.ServicePort + defaultBackend := ing.Spec.Backend + if defaultBackend != nil { + port, err := t.getServiceNodePort(*defaultBackend, ing.Namespace) + if err != nil { + glog.Infof("%v", err) + } else { + knownPorts = append(knownPorts, port) + } + } + for _, rule := range ing.Spec.Rules { + if rule.HTTP == nil { + glog.Errorf("ignoring non http Ingress rule") + continue + } + for _, path := range rule.HTTP.Paths { + port, err := t.getServiceNodePort(path.Backend, ing.Namespace) if err != nil { glog.Infof("%v", err) - } else { - knownPorts = append(knownPorts, port) - } - } - for _, rule := range ing.Spec.Rules { - if rule.HTTP == nil { - glog.Errorf("ignoring non http Ingress rule") continue } - for _, path := range rule.HTTP.Paths { - port, err := t.getServiceNodePort(path.Backend, ing.Namespace) - if err != nil { - glog.Infof("%v", err) - continue - } - knownPorts = append(knownPorts, port) - } + knownPorts = append(knownPorts, port) } } return knownPorts @@ -592,10 +625,11 @@ func (t *GCETranslator) getHTTPProbe(svc api_v1.Service, targetPort intstr.IntOr // isSimpleHTTPProbe returns true if the given Probe is: // - an HTTPGet probe, as opposed to a tcp or exec probe -// - has no special host or headers fields +// - has no special host or headers fields, except for possibly an HTTP Host header func isSimpleHTTPProbe(probe *api_v1.Probe) bool { return (probe != nil && probe.Handler.HTTPGet != nil && probe.Handler.HTTPGet.Host == "" && - len(probe.Handler.HTTPGet.HTTPHeaders) == 0) + (len(probe.Handler.HTTPGet.HTTPHeaders) == 0 || + (len(probe.Handler.HTTPGet.HTTPHeaders) == 1 && probe.Handler.HTTPGet.HTTPHeaders[0].Name == "Host"))) } // GetProbe returns a probe that's used for the given nodeport @@ -639,3 +673,34 @@ func (o PodsByCreationTimestamp) Less(i, j int) bool { } return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) } + +// setInstanceGroupsAnnotation sets the instance-groups annotation with names of the given instance groups. +func setInstanceGroupsAnnotation(existing map[string]string, igs []*compute.InstanceGroup) error { + type Value struct { + Name string + Zone string + } + var instanceGroups []Value + for _, ig := range igs { + instanceGroups = append(instanceGroups, Value{Name: ig.Name, Zone: ig.Zone}) + } + jsonValue, err := json.Marshal(instanceGroups) + if err != nil { + return err + } + existing[instanceGroupsAnnotationKey] = string(jsonValue) + return nil +} + +// uniq returns an array of unique service ports from the given array. +func uniq(nodePorts []backends.ServicePort) []backends.ServicePort { + portMap := map[int64]backends.ServicePort{} + for _, p := range nodePorts { + portMap[p.Port] = p + } + nodePorts = make([]backends.ServicePort, 0, len(portMap)) + for _, sp := range portMap { + nodePorts = append(nodePorts, sp) + } + return nodePorts +} diff --git a/controllers/gce/controller/util_test.go b/controllers/gce/controller/utils_test.go similarity index 82% rename from controllers/gce/controller/util_test.go rename to controllers/gce/controller/utils_test.go index c521b649d..196f3c937 100644 --- a/controllers/gce/controller/util_test.go +++ b/controllers/gce/controller/utils_test.go @@ -21,10 +21,12 @@ import ( "testing" "time" + compute "google.golang.org/api/compute/v1" + + api_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" - api_v1 "k8s.io/client-go/pkg/api/v1" "k8s.io/ingress/controllers/gce/backends" "k8s.io/ingress/controllers/gce/utils" ) @@ -76,20 +78,19 @@ func TestInstancesAddedToZones(t *testing.T) { lbc.CloudClusterManager.instancePool.Sync([]string{"n1", "n2", "n3"}) gotZonesToNode := cm.fakeIGs.GetInstancesByZone() - i := 0 + if cm.fakeIGs.Ports[0] != testPort { + t.Errorf("Expected the same node port on all igs, got ports %+v", cm.fakeIGs.Ports) + } + for z, nodeNames := range zoneToNode { if ig, err := cm.fakeIGs.GetInstanceGroup(testIG, z); err != nil { t.Errorf("Failed to find ig %v in zone %v, found %+v: %v", testIG, z, ig, err) } - if cm.fakeIGs.Ports[i] != testPort { - t.Errorf("Expected the same node port on all igs, got ports %+v", cm.fakeIGs.Ports) - } expNodes := sets.NewString(nodeNames...) gotNodes := sets.NewString(gotZonesToNode[z]...) if !gotNodes.Equal(expNodes) { t.Errorf("Nodes not added to zones, expected %+v got %+v", expNodes, gotNodes) } - i++ } } @@ -264,3 +265,43 @@ func addNodes(lbc *LoadBalancerController, zoneToNode map[string][]string) { func getProbePath(p *api_v1.Probe) string { return p.Handler.HTTPGet.Path } + +func TestAddInstanceGroupsAnnotation(t *testing.T) { + testCases := []struct { + Igs []*compute.InstanceGroup + ExpectedAnnotation string + }{ + { + // Single zone. + []*compute.InstanceGroup{&compute.InstanceGroup{ + Name: "ig-name", + Zone: "https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-b", + }}, + `[{"Name":"ig-name","Zone":"https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-b"}]`, + }, + { + // Multiple zones. + []*compute.InstanceGroup{ + &compute.InstanceGroup{ + Name: "ig-name-1", + Zone: "https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-b", + }, + &compute.InstanceGroup{ + Name: "ig-name-2", + Zone: "https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-a", + }, + }, + `[{"Name":"ig-name-1","Zone":"https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-b"},{"Name":"ig-name-2","Zone":"https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1-a"}]`, + }, + } + for _, c := range testCases { + annotations := map[string]string{} + err := setInstanceGroupsAnnotation(annotations, c.Igs) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if annotations[instanceGroupsAnnotationKey] != c.ExpectedAnnotation { + t.Fatalf("Unexpected annotation value: %s, expected: %s", annotations[instanceGroupsAnnotationKey], c.ExpectedAnnotation) + } + } +} diff --git a/controllers/gce/examples/health_checks/health_check_app.yaml b/controllers/gce/examples/health_checks/health_check_app.yaml index 8b3e4e146..9b26cfed7 100644 --- a/controllers/gce/examples/health_checks/health_check_app.yaml +++ b/controllers/gce/examples/health_checks/health_check_app.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - name: echoheaders - image: gcr.io/google_containers/echoserver:1.5 + image: gcr.io/google_containers/echoserver:1.8 ports: - containerPort: 8080 readinessProbe: @@ -23,22 +23,22 @@ spec: successThreshold: 1 failureThreshold: 10 env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP --- apiVersion: v1 diff --git a/controllers/gce/examples/https/README.md b/controllers/gce/examples/https/README.md index dcbb40cde..a58a43086 100644 --- a/controllers/gce/examples/https/README.md +++ b/controllers/gce/examples/https/README.md @@ -12,7 +12,7 @@ $ kubectl --namespace=kube-system get pod -l name=glbc NAME l7-lb-controller-v0.6.0-1770t ... ``` -Also make sure you have a [firewall rule](https://github.com/kubernetes/contrib/blob/master/ingress/controllers/gce/BETA_LIMITATIONS.md#creating-the-fir-glbc-health-checks) for the node port of the Service. +Also make sure you have a [firewall rule](https://github.com/kubernetes/ingress/blob/master/controllers/gce/BETA_LIMITATIONS.md#creating-the-fir-glbc-health-checks) for the node port of the Service. Create Ingress ```console diff --git a/controllers/gce/examples/https/make_secret.go b/controllers/gce/examples/https/make_secret.go index 8cb6ced9c..ab3537bd7 100644 --- a/controllers/gce/examples/https/make_secret.go +++ b/controllers/gce/examples/https/make_secret.go @@ -26,13 +26,13 @@ import ( "io/ioutil" "log" + api_v1 "k8s.io/api/core/v1" registered "k8s.io/apimachinery/pkg/apimachinery/registered" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" // This installs the legacy v1 API + "k8s.io/kubernetes/pkg/api" _ "k8s.io/kubernetes/pkg/api/install" ) diff --git a/controllers/gce/examples/websocket/README.md b/controllers/gce/examples/websocket/README.md index b2fe8657f..4edad6cdd 100644 --- a/controllers/gce/examples/websocket/README.md +++ b/controllers/gce/examples/websocket/README.md @@ -55,7 +55,7 @@ Wait for the loadbalancer to be created and functioning. When you receive a succ Websocket example. Connect to /ws% ``` -The binary we deployed does not have any html/javascript to demonstrate thwe websocket, so we'll use websocket.org's client. +The binary we deployed does not have any html/javascript to demonstrate the websocket, so we'll use websocket.org's client. Visit http://www.websocket.org/echo.html. It's important to use `HTTP` instead of `HTTPS` since we assembled an `HTTP` load balancer. Browsers may prevent `HTTP` websocket connections as a security feature. Set the `Location` to diff --git a/controllers/gce/firewalls/fakes.go b/controllers/gce/firewalls/fakes.go index 852e22451..8ee43ad3b 100644 --- a/controllers/gce/firewalls/fakes.go +++ b/controllers/gce/firewalls/fakes.go @@ -18,86 +18,66 @@ package firewalls import ( "fmt" - "strconv" compute "google.golang.org/api/compute/v1" - netset "k8s.io/kubernetes/pkg/util/net/sets" "k8s.io/ingress/controllers/gce/utils" ) type fakeFirewallsProvider struct { - fw map[string]*compute.Firewall - namer *utils.Namer + fw map[string]*compute.Firewall + networkUrl string } // NewFakeFirewallsProvider creates a fake for firewall rules. -func NewFakeFirewallsProvider(namer *utils.Namer) *fakeFirewallsProvider { +func NewFakeFirewallsProvider() *fakeFirewallsProvider { return &fakeFirewallsProvider{ - fw: make(map[string]*compute.Firewall), - namer: namer, + fw: make(map[string]*compute.Firewall), } } -func (f *fakeFirewallsProvider) GetFirewall(prefixedName string) (*compute.Firewall, error) { - rule, exists := f.fw[prefixedName] +func (ff *fakeFirewallsProvider) GetFirewall(name string) (*compute.Firewall, error) { + rule, exists := ff.fw[name] if exists { return rule, nil } return nil, utils.FakeGoogleAPINotFoundErr() } -func (f *fakeFirewallsProvider) CreateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error { - prefixedName := f.namer.FrName(name) - strPorts := []string{} - for _, p := range ports { - strPorts = append(strPorts, strconv.FormatInt(p, 10)) - } - if _, exists := f.fw[prefixedName]; exists { - return fmt.Errorf("firewall rule %v already exists", prefixedName) - } - - f.fw[prefixedName] = &compute.Firewall{ - // To accurately mimic the cloudprovider we need to add the k8s-fw - // prefix to the given rule name. - Name: prefixedName, - SourceRanges: srcRange.StringSlice(), - Allowed: []*compute.FirewallAllowed{{Ports: strPorts}}, - TargetTags: hosts, // WARNING: This is actually not correct, but good enough for testing this package +func (ff *fakeFirewallsProvider) CreateFirewall(f *compute.Firewall) error { + if _, exists := ff.fw[f.Name]; exists { + return fmt.Errorf("firewall rule %v already exists", f.Name) } + ff.fw[f.Name] = f return nil } -func (f *fakeFirewallsProvider) DeleteFirewall(name string) error { +func (ff *fakeFirewallsProvider) DeleteFirewall(name string) error { // We need the full name for the same reason as CreateFirewall. - prefixedName := f.namer.FrName(name) - _, exists := f.fw[prefixedName] + _, exists := ff.fw[name] if !exists { return utils.FakeGoogleAPINotFoundErr() } - delete(f.fw, prefixedName) + delete(ff.fw, name) return nil } -func (f *fakeFirewallsProvider) UpdateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error { - strPorts := []string{} - for _, p := range ports { - strPorts = append(strPorts, strconv.FormatInt(p, 10)) - } - +func (ff *fakeFirewallsProvider) UpdateFirewall(f *compute.Firewall) error { // We need the full name for the same reason as CreateFirewall. - prefixedName := f.namer.FrName(name) - _, exists := f.fw[prefixedName] + _, exists := ff.fw[f.Name] if !exists { - return fmt.Errorf("update failed for rule %v, srcRange %v ports %v, rule not found", prefixedName, srcRange, ports) + return fmt.Errorf("update failed for rule %v, srcRange %v ports %+v, rule not found", f.Name, f.SourceRanges, f.Allowed) } - f.fw[prefixedName] = &compute.Firewall{ - Name: name, - SourceRanges: srcRange.StringSlice(), - Allowed: []*compute.FirewallAllowed{{Ports: strPorts}}, - TargetTags: hosts, // WARNING: This is actually not correct, but good enough for testing this package - } + ff.fw[f.Name] = f return nil } + +func (ff *fakeFirewallsProvider) NetworkURL() string { + return ff.networkUrl +} + +func (ff *fakeFirewallsProvider) GetNodeTags(nodeNames []string) ([]string, error) { + return nodeNames, nil +} diff --git a/controllers/gce/firewalls/firewalls.go b/controllers/gce/firewalls/firewalls.go index 211595aad..a8aceca39 100644 --- a/controllers/gce/firewalls/firewalls.go +++ b/controllers/gce/firewalls/firewalls.go @@ -35,18 +35,18 @@ var l7SrcRanges = []string{"130.211.0.0/22", "35.191.0.0/16"} type FirewallRules struct { cloud Firewall namer *utils.Namer - srcRanges netset.IPNet + srcRanges []string } // NewFirewallPool creates a new firewall rule manager. // cloud: the cloud object implementing Firewall. // namer: cluster namer. func NewFirewallPool(cloud Firewall, namer *utils.Namer) SingleFirewallPool { - srcNetSet, err := netset.ParseIPNets(l7SrcRanges...) + _, err := netset.ParseIPNets(l7SrcRanges...) if err != nil { glog.Fatalf("Could not parse L7 src ranges %v for firewall rule: %v", l7SrcRanges, err) } - return &FirewallRules{cloud: cloud, namer: namer, srcRanges: srcNetSet} + return &FirewallRules{cloud: cloud, namer: namer, srcRanges: l7SrcRanges} } // Sync sync firewall rules with the cloud. @@ -60,9 +60,15 @@ func (fr *FirewallRules) Sync(nodePorts []int64, nodeNames []string) error { // instead of the whole name. name := fr.namer.FrName(suffix) rule, _ := fr.cloud.GetFirewall(name) + + firewall, err := fr.createFirewallObject(name, "GCE L7 firewall rule", nodePorts, nodeNames) + if err != nil { + return err + } + if rule == nil { glog.Infof("Creating global l7 firewall rule %v", name) - return fr.cloud.CreateFirewall(suffix, "GCE L7 firewall rule", fr.srcRanges, nodePorts, nodeNames) + return fr.cloud.CreateFirewall(firewall) } requiredPorts := sets.NewString() @@ -85,17 +91,17 @@ func (fr *FirewallRules) Sync(nodePorts []int64, nodeNames []string) error { glog.V(4).Info("Firewall does not need update of ports or source ranges") return nil } - glog.V(3).Infof("Firewall %v already exists, updating nodeports %v", name, nodePorts) - return fr.cloud.UpdateFirewall(suffix, "GCE L7 firewall", fr.srcRanges, nodePorts, nodeNames) + return fr.cloud.UpdateFirewall(firewall) } // Shutdown shuts down this firewall rules manager. func (fr *FirewallRules) Shutdown() error { - glog.Infof("Deleting firewall with suffix %v", fr.namer.FrSuffix()) - err := fr.cloud.DeleteFirewall(fr.namer.FrSuffix()) + name := fr.namer.FrName(fr.namer.FrSuffix()) + glog.Infof("Deleting firewall %v", name) + err := fr.cloud.DeleteFirewall(name) if err != nil && utils.IsHTTPErrorCode(err, 404) { - glog.Infof("Firewall with suffix %v didn't exist at Shutdown", fr.namer.FrSuffix()) + glog.Infof("Firewall with name %v didn't exist at Shutdown", name) return nil } return err @@ -107,3 +113,31 @@ func (fr *FirewallRules) Shutdown() error { func (fr *FirewallRules) GetFirewall(name string) (*compute.Firewall, error) { return fr.cloud.GetFirewall(name) } + +func (fr *FirewallRules) createFirewallObject(firewallName, description string, nodePorts []int64, nodeNames []string) (*compute.Firewall, error) { + ports := make([]string, len(nodePorts)) + for ix := range nodePorts { + ports[ix] = strconv.Itoa(int(nodePorts[ix])) + } + + // If the node tags to be used for this cluster have been predefined in the + // provider config, just use them. Otherwise, invoke computeHostTags method to get the tags. + targetTags, err := fr.cloud.GetNodeTags(nodeNames) + if err != nil { + return nil, err + } + + return &compute.Firewall{ + Name: firewallName, + Description: description, + SourceRanges: fr.srcRanges, + Network: fr.cloud.NetworkURL(), + Allowed: []*compute.FirewallAllowed{ + { + IPProtocol: "tcp", + Ports: ports, + }, + }, + TargetTags: targetTags, + }, nil +} diff --git a/controllers/gce/firewalls/firewalls_test.go b/controllers/gce/firewalls/firewalls_test.go index ccdf1c9a3..484ad90be 100644 --- a/controllers/gce/firewalls/firewalls_test.go +++ b/controllers/gce/firewalls/firewalls_test.go @@ -22,14 +22,11 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/ingress/controllers/gce/utils" - netset "k8s.io/kubernetes/pkg/util/net/sets" ) -const allCIDR = "0.0.0.0/0" - func TestSyncFirewallPool(t *testing.T) { namer := utils.NewNamer("ABC", "XYZ") - fwp := NewFakeFirewallsProvider(namer) + fwp := NewFakeFirewallsProvider() fp := NewFirewallPool(fwp, namer) ruleName := namer.FrName(namer.FrSuffix()) @@ -50,12 +47,16 @@ func TestSyncFirewallPool(t *testing.T) { } verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t) - srcRanges, _ := netset.ParseIPNets(allCIDR) - err = fwp.UpdateFirewall(namer.FrSuffix(), "", srcRanges, nodePorts, nodes) + firewall, err := fp.(*FirewallRules).createFirewallObject(namer.FrName(namer.FrSuffix()), "", nodePorts, nodes) + if err != nil { + t.Errorf("unexpected err when creating firewall object, err: %v", err) + } + + err = fwp.UpdateFirewall(firewall) if err != nil { t.Errorf("failed to update firewall rule, err: %v", err) } - verifyFirewallRule(fwp, ruleName, nodePorts, nodes, []string{allCIDR}, t) + verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t) // Run Sync and expect l7 src ranges to be returned err = fp.Sync(nodePorts, nodes) diff --git a/controllers/gce/firewalls/interfaces.go b/controllers/gce/firewalls/interfaces.go index 3425bb59f..97fc6a74d 100644 --- a/controllers/gce/firewalls/interfaces.go +++ b/controllers/gce/firewalls/interfaces.go @@ -18,7 +18,6 @@ package firewalls import ( compute "google.golang.org/api/compute/v1" - netset "k8s.io/kubernetes/pkg/util/net/sets" ) // SingleFirewallPool syncs the firewall rule for L7 traffic. @@ -32,8 +31,10 @@ type SingleFirewallPool interface { // This interface is a little different from the rest because it dovetails into // the same firewall methods used by the TCPLoadBalancer. type Firewall interface { - CreateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error + CreateFirewall(f *compute.Firewall) error GetFirewall(name string) (*compute.Firewall, error) DeleteFirewall(name string) error - UpdateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error + UpdateFirewall(f *compute.Firewall) error + GetNodeTags(nodeNames []string) ([]string, error) + NetworkURL() string } diff --git a/controllers/gce/healthchecks/healthchecks.go b/controllers/gce/healthchecks/healthchecks.go index 98f0a5f43..b150f2f2a 100644 --- a/controllers/gce/healthchecks/healthchecks.go +++ b/controllers/gce/healthchecks/healthchecks.go @@ -125,6 +125,12 @@ func (h *HealthChecks) Get(port int64) (*HealthCheck, error) { return NewHealthCheck(hc), err } +// GetLegacy deletes legacy HTTP health checks +func (h *HealthChecks) GetLegacy(port int64) (*compute.HttpHealthCheck, error) { + name := h.namer.BeName(port) + return h.cloud.GetHttpHealthCheck(name) +} + // DeleteLegacy deletes legacy HTTP health checks func (h *HealthChecks) DeleteLegacy(port int64) error { name := h.namer.BeName(port) diff --git a/controllers/gce/healthchecks/interfaces.go b/controllers/gce/healthchecks/interfaces.go index cdf8c635d..e63f1f491 100644 --- a/controllers/gce/healthchecks/interfaces.go +++ b/controllers/gce/healthchecks/interfaces.go @@ -41,5 +41,6 @@ type HealthChecker interface { Sync(hc *HealthCheck) (string, error) Delete(port int64) error Get(port int64) (*HealthCheck, error) + GetLegacy(port int64) (*compute.HttpHealthCheck, error) DeleteLegacy(port int64) error } diff --git a/controllers/gce/ingress-app.yaml b/controllers/gce/ingress-app.yaml index 379405214..b9b3ade51 100644 --- a/controllers/gce/ingress-app.yaml +++ b/controllers/gce/ingress-app.yaml @@ -49,26 +49,26 @@ spec: spec: containers: - name: echoheaders - image: gcr.io/google_containers/echoserver:1.5 + image: gcr.io/google_containers/echoserver:1.8 ports: - containerPort: 8080 env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP --- # This is the Ingress resource that creates a HTTP Loadbalancer configured diff --git a/controllers/gce/instances/fakes.go b/controllers/gce/instances/fakes.go index 389eccf36..37d38fa81 100644 --- a/controllers/gce/instances/fakes.go +++ b/controllers/gce/instances/fakes.go @@ -18,6 +18,7 @@ package instances import ( "fmt" + "strings" compute "google.golang.org/api/compute/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -75,15 +76,16 @@ func (f *FakeInstanceGroups) GetInstanceGroup(name, zone string) (*compute.Insta return ig, nil } } - // TODO: Return googleapi 404 error - return nil, fmt.Errorf("instance group %v not found", name) + + return nil, utils.FakeGoogleAPINotFoundErr() } // CreateInstanceGroup fakes instance group creation. -func (f *FakeInstanceGroups) CreateInstanceGroup(name, zone string) (*compute.InstanceGroup, error) { - newGroup := &compute.InstanceGroup{Name: name, SelfLink: name, Zone: zone} - f.instanceGroups = append(f.instanceGroups, newGroup) - return newGroup, nil +func (f *FakeInstanceGroups) CreateInstanceGroup(ig *compute.InstanceGroup, zone string) error { + ig.SelfLink = ig.Name + ig.Zone = zone + f.instanceGroups = append(f.instanceGroups, ig) + return nil } // DeleteInstanceGroup fakes instance group deletion. @@ -110,7 +112,8 @@ func (f *FakeInstanceGroups) ListInstancesInInstanceGroup(name, zone string, sta } // AddInstancesToInstanceGroup fakes adding instances to an instance group. -func (f *FakeInstanceGroups) AddInstancesToInstanceGroup(name, zone string, instanceNames []string) error { +func (f *FakeInstanceGroups) AddInstancesToInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error { + instanceNames := toInstanceNames(instanceRefs) f.calls = append(f.calls, utils.AddInstances) f.instances.Insert(instanceNames...) if _, ok := f.zonesToInstances[zone]; !ok { @@ -126,7 +129,8 @@ func (f *FakeInstanceGroups) GetInstancesByZone() map[string][]string { } // RemoveInstancesFromInstanceGroup fakes removing instances from an instance group. -func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string, instanceNames []string) error { +func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error { + instanceNames := toInstanceNames(instanceRefs) f.calls = append(f.calls, utils.RemoveInstances) f.instances.Delete(instanceNames...) l, ok := f.zonesToInstances[zone] @@ -145,10 +149,23 @@ func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string, return nil } -// AddPortToInstanceGroup fakes adding ports to an Instance Group. -func (f *FakeInstanceGroups) AddPortToInstanceGroup(ig *compute.InstanceGroup, port int64) (*compute.NamedPort, error) { - f.Ports = append(f.Ports, port) - return &compute.NamedPort{Name: f.namer.BeName(port), Port: port}, nil +func (f *FakeInstanceGroups) SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error { + found := false + for _, ig := range f.instanceGroups { + if ig.Name == igName && ig.Zone == zone { + found = true + break + } + } + if !found { + return fmt.Errorf("Failed to find instance group %q in zone %q", igName, zone) + } + + f.Ports = f.Ports[:0] + for _, port := range namedPorts { + f.Ports = append(f.Ports, port.Port) + } + return nil } // getInstanceList returns an instance list based on the given names. @@ -157,9 +174,7 @@ func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances instanceNames := nodeNames.List() computeInstances := []*compute.InstanceWithNamedPorts{} for _, name := range instanceNames { - instanceLink := fmt.Sprintf( - "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", - "project", "zone", name) + instanceLink := getInstanceUrl(name) computeInstances = append( computeInstances, &compute.InstanceWithNamedPorts{ Instance: instanceLink}) @@ -168,3 +183,26 @@ func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances Items: computeInstances, } } + +func (f *FakeInstanceGroups) ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference) { + for _, ins := range instanceNames { + instanceLink := getInstanceUrl(ins) + refs = append(refs, &compute.InstanceReference{Instance: instanceLink}) + } + return refs +} + +func getInstanceUrl(instanceName string) string { + return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", + "project", "zone", instanceName) +} + +func toInstanceNames(instanceRefs []*compute.InstanceReference) []string { + instanceNames := make([]string, len(instanceRefs)) + for ix := range instanceRefs { + url := instanceRefs[ix].Instance + parts := strings.Split(url, "/") + instanceNames[ix] = parts[len(parts)-1] + } + return instanceNames +} diff --git a/controllers/gce/instances/instances.go b/controllers/gce/instances/instances.go index b49f7cd01..fd54de034 100644 --- a/controllers/gce/instances/instances.go +++ b/controllers/gce/instances/instances.go @@ -63,29 +63,55 @@ func (i *Instances) Init(zl zoneLister) { // all of which have the exact same named port. func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) { igs := []*compute.InstanceGroup{} - namedPort := &compute.NamedPort{} + namedPort := utils.GetNamedPort(port) zones, err := i.ListZones() if err != nil { return igs, namedPort, err } + defer i.snapshotter.Add(name, struct{}{}) for _, zone := range zones { - ig, _ := i.Get(name, zone) - var err error + ig, err := i.Get(name, zone) + if err != nil && !utils.IsHTTPErrorCode(err, http.StatusNotFound) { + glog.Errorf("Failed to get instance group %v/%v, err: %v", zone, name, err) + return nil, nil, err + } + if ig == nil { glog.Infof("Creating instance group %v in zone %v", name, zone) - ig, err = i.cloud.CreateInstanceGroup(name, zone) + if err = i.cloud.CreateInstanceGroup(&compute.InstanceGroup{Name: name}, zone); err != nil { + // Error may come back with StatusConflict meaning the instance group was created by another controller + // possibly the Service Controller for internal load balancers. + if utils.IsHTTPErrorCode(err, http.StatusConflict) { + glog.Warningf("Failed to create instance group %v/%v due to conflict status, but continuing sync. err: %v", zone, name, err) + } else { + glog.Errorf("Failed to create instance group %v/%v, err: %v", zone, name, err) + return nil, nil, err + } + } + ig, err = i.cloud.GetInstanceGroup(name, zone) if err != nil { + glog.Errorf("Failed to get instance group %v/%v after ensuring existence, err: %v", zone, name, err) return nil, nil, err } } else { - glog.V(3).Infof("Instance group %v already exists in zone %v, adding port %d to it", name, zone, port) + glog.V(3).Infof("Instance group %v already exists in zone %v", name, zone) } - defer i.snapshotter.Add(name, struct{}{}) - namedPort, err = i.cloud.AddPortToInstanceGroup(ig, port) - if err != nil { - return nil, nil, err + + found := false + for _, np := range ig.NamedPorts { + if np.Port == port { + glog.V(3).Infof("Instance group %v already has named port %+v", ig.Name, np) + found = true + break + } + } + if !found { + glog.V(3).Infof("Instance group %v/%v does not have port %+v, adding it now.", zone, name, namedPort) + if err := i.cloud.SetNamedPortsOfInstanceGroup(ig.Name, zone, append(ig.NamedPorts, namedPort)); err != nil { + return nil, nil, err + } } igs = append(igs, ig) } @@ -103,11 +129,15 @@ func (i *Instances) DeleteInstanceGroup(name string) error { } for _, zone := range zones { if err := i.cloud.DeleteInstanceGroup(name, zone); err != nil { - if !utils.IsHTTPErrorCode(err, http.StatusNotFound) { + if utils.IsNotFoundError(err) { + glog.V(3).Infof("Instance group %v in zone %v did not exist", name, zone) + } else if utils.IsInUsedByError(err) { + glog.V(3).Infof("Could not delete instance group %v in zone %v because it's still in use. Ignoring: %v", name, zone, err) + } else { errs = append(errs, err) } } else { - glog.Infof("Deleted instance group %v in zone %v", name, zone) + glog.V(3).Infof("Deleted instance group %v in zone %v", name, zone) } } if len(errs) == 0 { @@ -173,7 +203,7 @@ func (i *Instances) Add(groupName string, names []string) error { errs := []error{} for zone, nodeNames := range i.splitNodesByZone(names) { glog.V(1).Infof("Adding nodes %v to %v in zone %v", nodeNames, groupName, zone) - if err := i.cloud.AddInstancesToInstanceGroup(groupName, zone, nodeNames); err != nil { + if err := i.cloud.AddInstancesToInstanceGroup(groupName, zone, i.cloud.ToInstanceReferences(zone, nodeNames)); err != nil { errs = append(errs, err) } } @@ -187,8 +217,8 @@ func (i *Instances) Add(groupName string, names []string) error { func (i *Instances) Remove(groupName string, names []string) error { errs := []error{} for zone, nodeNames := range i.splitNodesByZone(names) { - glog.V(1).Infof("Adding nodes %v to %v in zone %v", nodeNames, groupName, zone) - if err := i.cloud.RemoveInstancesFromInstanceGroup(groupName, zone, nodeNames); err != nil { + glog.V(1).Infof("Removing nodes %v from %v in zone %v", nodeNames, groupName, zone) + if err := i.cloud.RemoveInstancesFromInstanceGroup(groupName, zone, i.cloud.ToInstanceReferences(zone, nodeNames)); err != nil { errs = append(errs, err) } } @@ -238,7 +268,7 @@ func (i *Instances) Sync(nodes []string) (err error) { } if len(addNodes) != 0 { - glog.V(4).Infof("Adding nodes to IG: %v", removeNodes) + glog.V(4).Infof("Adding nodes to IG: %v", addNodes) if err = i.Add(igName, addNodes); err != nil { return err } diff --git a/controllers/gce/instances/interfaces.go b/controllers/gce/instances/interfaces.go index 01c6bf352..94d3116b7 100644 --- a/controllers/gce/instances/interfaces.go +++ b/controllers/gce/instances/interfaces.go @@ -45,12 +45,13 @@ type NodePool interface { // InstanceGroups is an interface for managing gce instances groups, and the instances therein. type InstanceGroups interface { GetInstanceGroup(name, zone string) (*compute.InstanceGroup, error) - CreateInstanceGroup(name, zone string) (*compute.InstanceGroup, error) + CreateInstanceGroup(ig *compute.InstanceGroup, zone string) error DeleteInstanceGroup(name, zone string) error // TODO: Refactor for modulatiry. ListInstancesInInstanceGroup(name, zone string, state string) (*compute.InstanceGroupsListInstances, error) - AddInstancesToInstanceGroup(name, zone string, instanceNames []string) error - RemoveInstancesFromInstanceGroup(name, zone string, instanceName []string) error - AddPortToInstanceGroup(ig *compute.InstanceGroup, port int64) (*compute.NamedPort, error) + AddInstancesToInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error + RemoveInstancesFromInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error + ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference) + SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error } diff --git a/controllers/gce/instances/utils.go b/controllers/gce/instances/utils.go new file mode 100644 index 000000000..934311c66 --- /dev/null +++ b/controllers/gce/instances/utils.go @@ -0,0 +1,13 @@ +package instances + +import ( + compute "google.golang.org/api/compute/v1" + + "k8s.io/ingress/controllers/gce/utils" +) + +// Helper method to create instance groups. +// This method exists to ensure that we are using the same logic at all places. +func EnsureInstanceGroupsAndPorts(nodePool NodePool, namer *utils.Namer, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) { + return nodePool.AddInstanceGroup(namer.IGName(), port) +} diff --git a/controllers/gce/loadbalancers/fakes.go b/controllers/gce/loadbalancers/fakes.go index 6860b1bff..bcbfac5c9 100644 --- a/controllers/gce/loadbalancers/fakes.go +++ b/controllers/gce/loadbalancers/fakes.go @@ -114,28 +114,21 @@ func (f *FakeLoadBalancers) GetGlobalForwardingRule(name string) (*compute.Forwa } // CreateGlobalForwardingRule fakes forwarding rule creation. -func (f *FakeLoadBalancers) CreateGlobalForwardingRule(proxyLink, ip, name, portRange string) (*compute.ForwardingRule, error) { +func (f *FakeLoadBalancers) CreateGlobalForwardingRule(rule *compute.ForwardingRule) error { f.calls = append(f.calls, "CreateGlobalForwardingRule") - if ip == "" { - ip = fmt.Sprintf(testIPManager.ip()) - } - rule := &compute.ForwardingRule{ - Name: name, - IPAddress: ip, - Target: proxyLink, - PortRange: portRange, - IPProtocol: "TCP", - SelfLink: name, + if rule.IPAddress == "" { + rule.IPAddress = fmt.Sprintf(testIPManager.ip()) } + rule.SelfLink = rule.Name f.Fw = append(f.Fw, rule) - return rule, nil + return nil } // SetProxyForGlobalForwardingRule fakes setting a global forwarding rule. -func (f *FakeLoadBalancers) SetProxyForGlobalForwardingRule(fw *compute.ForwardingRule, proxyLink string) error { +func (f *FakeLoadBalancers) SetProxyForGlobalForwardingRule(forwardingRuleName, proxyLink string) error { f.calls = append(f.calls, "SetProxyForGlobalForwardingRule") for i := range f.Fw { - if f.Fw[i].Name == fw.Name { + if f.Fw[i].Name == forwardingRuleName { f.Fw[i].Target = proxyLink } } @@ -181,27 +174,23 @@ func (f *FakeLoadBalancers) GetUrlMap(name string) (*compute.UrlMap, error) { } // CreateUrlMap fakes url-map creation. -func (f *FakeLoadBalancers) CreateUrlMap(backend *compute.BackendService, name string) (*compute.UrlMap, error) { +func (f *FakeLoadBalancers) CreateUrlMap(urlMap *compute.UrlMap) error { f.calls = append(f.calls, "CreateUrlMap") - urlMap := &compute.UrlMap{ - Name: name, - DefaultService: backend.SelfLink, - SelfLink: f.umName(), - } + urlMap.SelfLink = f.umName() f.Um = append(f.Um, urlMap) - return urlMap, nil + return nil } // UpdateUrlMap fakes updating url-maps. -func (f *FakeLoadBalancers) UpdateUrlMap(urlMap *compute.UrlMap) (*compute.UrlMap, error) { +func (f *FakeLoadBalancers) UpdateUrlMap(urlMap *compute.UrlMap) error { f.calls = append(f.calls, "UpdateUrlMap") for i := range f.Um { if f.Um[i].Name == urlMap.Name { f.Um[i] = urlMap - return urlMap, nil + return nil } } - return nil, nil + return fmt.Errorf("url map %v not found", urlMap.Name) } // DeleteUrlMap fakes url-map deletion. @@ -231,15 +220,11 @@ func (f *FakeLoadBalancers) GetTargetHttpProxy(name string) (*compute.TargetHttp } // CreateTargetHttpProxy fakes creating a target http proxy. -func (f *FakeLoadBalancers) CreateTargetHttpProxy(urlMap *compute.UrlMap, name string) (*compute.TargetHttpProxy, error) { +func (f *FakeLoadBalancers) CreateTargetHttpProxy(proxy *compute.TargetHttpProxy) error { f.calls = append(f.calls, "CreateTargetHttpProxy") - proxy := &compute.TargetHttpProxy{ - Name: name, - UrlMap: urlMap.SelfLink, - SelfLink: name, - } + proxy.SelfLink = proxy.Name f.Tp = append(f.Tp, proxy) - return proxy, nil + return nil } // DeleteTargetHttpProxy fakes deleting a target http proxy. @@ -280,16 +265,11 @@ func (f *FakeLoadBalancers) GetTargetHttpsProxy(name string) (*compute.TargetHtt } // CreateTargetHttpsProxy fakes creating a target http proxy. -func (f *FakeLoadBalancers) CreateTargetHttpsProxy(urlMap *compute.UrlMap, cert *compute.SslCertificate, name string) (*compute.TargetHttpsProxy, error) { +func (f *FakeLoadBalancers) CreateTargetHttpsProxy(proxy *compute.TargetHttpsProxy) error { f.calls = append(f.calls, "CreateTargetHttpsProxy") - proxy := &compute.TargetHttpsProxy{ - Name: name, - UrlMap: urlMap.SelfLink, - SslCertificates: []string{cert.SelfLink}, - SelfLink: name, - } + proxy.SelfLink = proxy.Name f.Tps = append(f.Tps, proxy) - return proxy, nil + return nil } // DeleteTargetHttpsProxy fakes deleting a target http proxy. @@ -397,20 +377,16 @@ func (f *FakeLoadBalancers) CheckURLMap(t *testing.T, l7 *L7, expectedMap map[st // Static IP fakes -// ReserveGlobalStaticIP fakes out static IP reservation. -func (f *FakeLoadBalancers) ReserveGlobalStaticIP(name, IPAddress string) (*compute.Address, error) { - f.calls = append(f.calls, "ReserveGlobalStaticIP") - ip := &compute.Address{ - Name: name, - Address: IPAddress, - } - f.IP = append(f.IP, ip) - return ip, nil +// ReserveGlobalAddress fakes out static IP reservation. +func (f *FakeLoadBalancers) ReserveGlobalAddress(addr *compute.Address) error { + f.calls = append(f.calls, "ReserveGlobalAddress") + f.IP = append(f.IP, addr) + return nil } -// GetGlobalStaticIP fakes out static IP retrieval. -func (f *FakeLoadBalancers) GetGlobalStaticIP(name string) (*compute.Address, error) { - f.calls = append(f.calls, "GetGlobalStaticIP") +// GetGlobalAddress fakes out static IP retrieval. +func (f *FakeLoadBalancers) GetGlobalAddress(name string) (*compute.Address, error) { + f.calls = append(f.calls, "GetGlobalAddress") for i := range f.IP { if f.IP[i].Name == name { return f.IP[i], nil @@ -419,9 +395,9 @@ func (f *FakeLoadBalancers) GetGlobalStaticIP(name string) (*compute.Address, er return nil, fmt.Errorf("static IP %v not found", name) } -// DeleteGlobalStaticIP fakes out static IP deletion. -func (f *FakeLoadBalancers) DeleteGlobalStaticIP(name string) error { - f.calls = append(f.calls, "DeleteGlobalStaticIP") +// DeleteGlobalAddress fakes out static IP deletion. +func (f *FakeLoadBalancers) DeleteGlobalAddress(name string) error { + f.calls = append(f.calls, "DeleteGlobalAddress") ip := []*compute.Address{} for i := range f.IP { if f.IP[i].Name != name { diff --git a/controllers/gce/loadbalancers/interfaces.go b/controllers/gce/loadbalancers/interfaces.go index 41e3ff087..de3504018 100644 --- a/controllers/gce/loadbalancers/interfaces.go +++ b/controllers/gce/loadbalancers/interfaces.go @@ -28,25 +28,25 @@ import ( type LoadBalancers interface { // Forwarding Rules GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error) - CreateGlobalForwardingRule(proxyLink, ip, name, portRange string) (*compute.ForwardingRule, error) + CreateGlobalForwardingRule(rule *compute.ForwardingRule) error DeleteGlobalForwardingRule(name string) error - SetProxyForGlobalForwardingRule(fw *compute.ForwardingRule, proxy string) error + SetProxyForGlobalForwardingRule(fw, proxy string) error // UrlMaps GetUrlMap(name string) (*compute.UrlMap, error) - CreateUrlMap(backend *compute.BackendService, name string) (*compute.UrlMap, error) - UpdateUrlMap(urlMap *compute.UrlMap) (*compute.UrlMap, error) + CreateUrlMap(urlMap *compute.UrlMap) error + UpdateUrlMap(urlMap *compute.UrlMap) error DeleteUrlMap(name string) error // TargetProxies GetTargetHttpProxy(name string) (*compute.TargetHttpProxy, error) - CreateTargetHttpProxy(urlMap *compute.UrlMap, name string) (*compute.TargetHttpProxy, error) + CreateTargetHttpProxy(proxy *compute.TargetHttpProxy) error DeleteTargetHttpProxy(name string) error SetUrlMapForTargetHttpProxy(proxy *compute.TargetHttpProxy, urlMap *compute.UrlMap) error // TargetHttpsProxies GetTargetHttpsProxy(name string) (*compute.TargetHttpsProxy, error) - CreateTargetHttpsProxy(urlMap *compute.UrlMap, SSLCerts *compute.SslCertificate, name string) (*compute.TargetHttpsProxy, error) + CreateTargetHttpsProxy(proxy *compute.TargetHttpsProxy) error DeleteTargetHttpsProxy(name string) error SetUrlMapForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, urlMap *compute.UrlMap) error SetSslCertificateForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, SSLCerts *compute.SslCertificate) error @@ -57,9 +57,10 @@ type LoadBalancers interface { DeleteSslCertificate(name string) error // Static IP - ReserveGlobalStaticIP(name, IPAddress string) (*compute.Address, error) - GetGlobalStaticIP(name string) (*compute.Address, error) - DeleteGlobalStaticIP(name string) error + + ReserveGlobalAddress(addr *compute.Address) error + GetGlobalAddress(name string) (*compute.Address, error) + DeleteGlobalAddress(name string) error } // LoadBalancerPool is an interface to manage the cloud resources associated diff --git a/controllers/gce/loadbalancers/loadbalancers.go b/controllers/gce/loadbalancers/loadbalancers.go index c3201f99f..15ded2562 100644 --- a/controllers/gce/loadbalancers/loadbalancers.go +++ b/controllers/gce/loadbalancers/loadbalancers.go @@ -163,13 +163,13 @@ func (l *L7s) Delete(name string) error { // Sync loadbalancers with the given runtime info from the controller. func (l *L7s) Sync(lbs []*L7RuntimeInfo) error { - glog.V(3).Infof("Creating loadbalancers %+v", lbs) + glog.V(3).Infof("Syncing loadbalancers %v", lbs) if len(lbs) != 0 { // Lazily create a default backend so we don't tax users who don't care // about Ingress by consuming 1 of their 3 GCE BackendServices. This // BackendService is GC'd when there are no more Ingresses. - if err := l.defaultBackendPool.Add(l.defaultBackendNodePort); err != nil { + if err := l.defaultBackendPool.Add(l.defaultBackendNodePort, nil); err != nil { return err } defaultBackend, err := l.defaultBackendPool.Get(l.defaultBackendNodePort.Port) @@ -257,6 +257,11 @@ type L7RuntimeInfo struct { StaticIPName string } +// String returns the load balancer name +func (l *L7RuntimeInfo) String() string { + return l.Name +} + // L7 represents a single L7 loadbalancer. type L7 struct { Name string @@ -304,7 +309,14 @@ func (l *L7) checkUrlMap(backend *compute.BackendService) (err error) { } glog.Infof("Creating url map %v for backend %v", urlMapName, l.glbcDefaultBackend.Name) - urlMap, err = l.cloud.CreateUrlMap(l.glbcDefaultBackend, urlMapName) + newUrlMap := &compute.UrlMap{ + Name: urlMapName, + DefaultService: l.glbcDefaultBackend.SelfLink, + } + if err = l.cloud.CreateUrlMap(newUrlMap); err != nil { + return err + } + urlMap, err = l.cloud.GetUrlMap(urlMapName) if err != nil { return err } @@ -320,7 +332,14 @@ func (l *L7) checkProxy() (err error) { proxy, _ := l.cloud.GetTargetHttpProxy(proxyName) if proxy == nil { glog.Infof("Creating new http proxy for urlmap %v", l.um.Name) - proxy, err = l.cloud.CreateTargetHttpProxy(l.um, proxyName) + newProxy := &compute.TargetHttpProxy{ + Name: proxyName, + UrlMap: l.um.SelfLink, + } + if err = l.cloud.CreateTargetHttpProxy(newProxy); err != nil { + return err + } + proxy, err = l.cloud.GetTargetHttpProxy(proxyName) if err != nil { return err } @@ -488,10 +507,20 @@ func (l *L7) checkHttpsProxy() (err error) { proxy, _ := l.cloud.GetTargetHttpsProxy(proxyName) if proxy == nil { glog.Infof("Creating new https proxy for urlmap %v", l.um.Name) - proxy, err = l.cloud.CreateTargetHttpsProxy(l.um, l.sslCert, proxyName) + newProxy := &compute.TargetHttpsProxy{ + Name: proxyName, + UrlMap: l.um.SelfLink, + SslCertificates: []string{l.sslCert.SelfLink}, + } + if err = l.cloud.CreateTargetHttpsProxy(newProxy); err != nil { + return err + } + + proxy, err = l.cloud.GetTargetHttpsProxy(proxyName) if err != nil { return err } + l.tps = proxy return nil } @@ -528,7 +557,17 @@ func (l *L7) checkForwardingRule(name, proxyLink, ip, portRange string) (fw *com if fw == nil { parts := strings.Split(proxyLink, "/") glog.Infof("Creating forwarding rule for proxy %v and ip %v:%v", parts[len(parts)-1:], ip, portRange) - fw, err = l.cloud.CreateGlobalForwardingRule(proxyLink, ip, name, portRange) + rule := &compute.ForwardingRule{ + Name: name, + IPAddress: ip, + Target: proxyLink, + PortRange: portRange, + IPProtocol: "TCP", + } + if err = l.cloud.CreateGlobalForwardingRule(rule); err != nil { + return nil, err + } + fw, err = l.cloud.GetGlobalForwardingRule(name) if err != nil { return nil, err } @@ -539,7 +578,7 @@ func (l *L7) checkForwardingRule(name, proxyLink, ip, portRange string) (fw *com } else { glog.Infof("Forwarding rule %v has the wrong proxy, setting %v overwriting %v", fw.Name, fw.Target, proxyLink) - if err := l.cloud.SetProxyForGlobalForwardingRule(fw, proxyLink); err != nil { + if err := l.cloud.SetProxyForGlobalForwardingRule(fw.Name, proxyLink); err != nil { return nil, err } } @@ -571,7 +610,7 @@ func (l *L7) getEffectiveIP() (string, bool) { if l.runtimeInfo.StaticIPName != "" { // Existing static IPs allocated to forwarding rules will get orphaned // till the Ingress is torn down. - if ip, err := l.cloud.GetGlobalStaticIP(l.runtimeInfo.StaticIPName); err != nil || ip == nil { + if ip, err := l.cloud.GetGlobalAddress(l.runtimeInfo.StaticIPName); err != nil || ip == nil { glog.Warningf("The given static IP name %v doesn't translate to an existing global static IP, ignoring it and allocating a new IP: %v", l.runtimeInfo.StaticIPName, err) } else { @@ -624,10 +663,10 @@ func (l *L7) checkStaticIP() (err error) { return nil } staticIPName := l.namer.Truncate(fmt.Sprintf("%v-%v", forwardingRulePrefix, l.Name)) - ip, _ := l.cloud.GetGlobalStaticIP(staticIPName) + ip, _ := l.cloud.GetGlobalAddress(staticIPName) if ip == nil { glog.Infof("Creating static ip %v", staticIPName) - ip, err = l.cloud.ReserveGlobalStaticIP(staticIPName, l.fw.IPAddress) + err = l.cloud.ReserveGlobalAddress(&compute.Address{Name: staticIPName, Address: l.fw.IPAddress}) if err != nil { if utils.IsHTTPErrorCode(err, http.StatusConflict) || utils.IsHTTPErrorCode(err, http.StatusBadRequest) { @@ -637,6 +676,10 @@ func (l *L7) checkStaticIP() (err error) { } return err } + ip, err = l.cloud.GetGlobalAddress(staticIPName) + if err != nil { + return err + } } l.ip = ip return nil @@ -757,7 +800,6 @@ func (l *L7) UpdateUrlMap(ingressRules utils.GCEURLMap) error { if l.um == nil { return fmt.Errorf("cannot add url without an urlmap") } - glog.V(3).Infof("Updating urlmap for l7 %v", l.Name) // All UrlMaps must have a default backend. If the Ingress has a default // backend, it applies to all host rules as well as to the urlmap itself. @@ -807,11 +849,17 @@ func (l *L7) UpdateUrlMap(ingressRules utils.GCEURLMap) error { glog.Infof("UrlMap for l7 %v is unchanged", l.Name) return nil } - glog.Infof("Updating url map: %+v", ingressRules) - um, err := l.cloud.UpdateUrlMap(l.um) + + glog.V(3).Infof("Updating URLMap: %q", l.Name) + if err := l.cloud.UpdateUrlMap(l.um); err != nil { + return err + } + + um, err := l.cloud.GetUrlMap(l.um.Name) if err != nil { return err } + l.um = um return nil } @@ -898,7 +946,7 @@ func (l *L7) Cleanup() error { } if l.ip != nil { glog.V(2).Infof("Deleting static IP %v(%v)", l.ip.Name, l.ip.Address) - if err := utils.IgnoreHTTPNotFound(l.cloud.DeleteGlobalStaticIP(l.ip.Name)); err != nil { + if err := utils.IgnoreHTTPNotFound(l.cloud.DeleteGlobalAddress(l.ip.Name)); err != nil { return err } l.ip = nil diff --git a/controllers/gce/loadbalancers/loadbalancers_test.go b/controllers/gce/loadbalancers/loadbalancers_test.go index 7f8a0635b..dc44e12dd 100644 --- a/controllers/gce/loadbalancers/loadbalancers_test.go +++ b/controllers/gce/loadbalancers/loadbalancers_test.go @@ -289,7 +289,7 @@ func TestCreateBothLoadBalancers(t *testing.T) { if err != nil || fw.Target != tp.SelfLink { t.Fatalf("%v", err) } - ip, err := f.GetGlobalStaticIP(f.fwName(false)) + ip, err := f.GetGlobalAddress(f.fwName(false)) if err != nil || ip.Address != fw.IPAddress || ip.Address != fws.IPAddress { t.Fatalf("%v", err) } diff --git a/controllers/gce/main.go b/controllers/gce/main.go index 1804b1f27..35d9145bd 100644 --- a/controllers/gce/main.go +++ b/controllers/gce/main.go @@ -17,8 +17,11 @@ limitations under the License. package main import ( + "bytes" go_flag "flag" "fmt" + "io" + "io/ioutil" "net/http" "os" "os/signal" @@ -30,14 +33,13 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" flag "github.com/spf13/pflag" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -47,6 +49,8 @@ import ( "k8s.io/ingress/controllers/gce/loadbalancers" "k8s.io/ingress/controllers/gce/storage" "k8s.io/ingress/controllers/gce/utils" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" ) // Entrypoint of GLBC. Example invocation: @@ -69,10 +73,13 @@ const ( alphaNumericChar = "0" // Current docker image version. Only used in debug logging. - imageVersion = "glbc:0.9.4" + imageVersion = "glbc:0.9.6" // Key used to persist UIDs to configmaps. uidConfigMapName = "ingress-uid" + + // Sleep interval to retry cloud client creation. + cloudClientRetryInterval = 10 * time.Second ) var ( @@ -122,7 +129,7 @@ var ( `Path used to health-check a backend service. All Services must serve a 200 page on this path. Currently this is only configurable globally.`) - watchNamespace = flags.String("watch-namespace", api.NamespaceAll, + watchNamespace = flags.String("watch-namespace", v1.NamespaceAll, `Namespace to watch for Ingress/Services/Endpoints.`) verbose = flags.Bool("verbose", false, @@ -242,13 +249,36 @@ func main() { SvcPort: intstr.FromInt(int(port)), } + var cloud *gce.GCECloud if *inCluster || *useRealCloud { // Create cluster manager namer, err := newNamer(kubeClient, *clusterName, controller.DefaultFirewallName) if err != nil { glog.Fatalf("%v", err) } - clusterManager, err = controller.NewClusterManager(*configFilePath, namer, defaultBackendNodePort, *healthCheckPath) + + // TODO: Make this more resilient. Currently we create the cloud client + // and pass it through to all the pools. This makes unit testing easier. + // However if the cloud client suddenly fails, we should try to re-create it + // and continue. + if *configFilePath != "" { + glog.Infof("Reading config from path %v", configFilePath) + config, err := os.Open(*configFilePath) + if err != nil { + glog.Fatalf("%v", err) + } + defer config.Close() + cloud = getGCEClient(config) + glog.Infof("Successfully loaded cloudprovider using config %q", configFilePath) + } else { + // While you might be tempted to refactor so we simply assing nil to the + // config and only invoke getGCEClient once, that will not do the right + // thing because a nil check against an interface isn't true in golang. + cloud = getGCEClient(nil) + glog.Infof("Created GCE client without a config file") + } + + clusterManager, err = controller.NewClusterManager(cloud, namer, defaultBackendNodePort, *healthCheckPath) if err != nil { glog.Fatalf("%v", err) } @@ -257,11 +287,14 @@ func main() { clusterManager = controller.NewFakeClusterManager(*clusterName, controller.DefaultFirewallName).ClusterManager } + ctx := controller.NewControllerContext(kubeClient, *watchNamespace, *resyncPeriod) + // Start loadbalancer controller - lbc, err := controller.NewLoadBalancerController(kubeClient, clusterManager, *resyncPeriod, *watchNamespace) + lbc, err := controller.NewLoadBalancerController(kubeClient, ctx, clusterManager) if err != nil { glog.Fatalf("%v", err) } + if clusterManager.ClusterNamer.GetClusterName() != "" { glog.V(3).Infof("Cluster name %+v", clusterManager.ClusterNamer.GetClusterName()) } @@ -269,6 +302,7 @@ func main() { go registerHandlers(lbc) go handleSigterm(lbc, *deleteAllOnQuit) + ctx.Start() lbc.Run() for { glog.Infof("Handled quit, awaiting pod deletion.") @@ -287,7 +321,7 @@ func newNamer(kubeClient kubernetes.Interface, clusterName string, fwName string } namer := utils.NewNamer(name, fw_name) - uidVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) + uidVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName) // Start a goroutine to poll the cluster UID config map // We don't watch because we know exactly which configmap we want and this @@ -359,7 +393,7 @@ func useDefaultOrLookupVault(cfgVault *storage.ConfigMapVault, cm_key, default_n // Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name. // else, use the cluster UID as a backup (this retains backwards compatibility). func getFirewallName(kubeClient kubernetes.Interface, name, cluster_uid string) (string, error) { - cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) + cfgVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName) if fw_name, err := useDefaultOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil { return "", err } else if fw_name != "" { @@ -377,7 +411,7 @@ func getFirewallName(kubeClient kubernetes.Interface, name, cluster_uid string) // - remember that "" is the cluster uid // else, allocate a new uid func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error) { - cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) + cfgVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName) if name, err := useDefaultOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil { return "", err } else if name != "" { @@ -385,7 +419,7 @@ func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error) } // Check if the cluster has an Ingress with ip - ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(meta_v1.ListOptions{ + ings, err := kubeClient.Extensions().Ingresses(metav1.NamespaceAll).List(metav1.ListOptions{ LabelSelector: labels.Everything().String(), }) if err != nil { @@ -419,10 +453,10 @@ func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error) // getNodePort waits for the Service, and returns it's first node port. func getNodePort(client kubernetes.Interface, ns, name string) (port, nodePort int32, err error) { - var svc *api_v1.Service + var svc *v1.Service glog.V(3).Infof("Waiting for %v/%v", ns, name) wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) { - svc, err = client.Core().Services(ns).Get(name, meta_v1.GetOptions{}) + svc, err = client.Core().Services(ns).Get(name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -438,3 +472,45 @@ func getNodePort(client kubernetes.Interface, ns, name string) (port, nodePort i }) return } + +func getGCEClient(config io.Reader) *gce.GCECloud { + getConfigReader := func() io.Reader { return nil } + + if config != nil { + allConfig, err := ioutil.ReadAll(config) + if err != nil { + glog.Fatalf("Error while reading entire config: %v", err) + } + glog.V(2).Infof("Using cloudprovider config file:\n%v ", string(allConfig)) + + getConfigReader = func() io.Reader { + return bytes.NewReader(allConfig) + } + } else { + glog.V(2).Infoln("No cloudprovider config file provided. Continuing with default values.") + } + + // Creating the cloud interface involves resolving the metadata server to get + // an oauth token. If this fails, the token provider assumes it's not on GCE. + // No errors are thrown. So we need to keep retrying till it works because + // we know we're on GCE. + for { + cloudInterface, err := cloudprovider.GetCloudProvider("gce", getConfigReader()) + if err == nil { + cloud := cloudInterface.(*gce.GCECloud) + + // If this controller is scheduled on a node without compute/rw + // it won't be allowed to list backends. We can assume that the + // user has no need for Ingress in this case. If they grant + // permissions to the node they will have to restart the controller + // manually to re-create the client. + if _, err = cloud.ListGlobalBackendServices(); err == nil || utils.IsHTTPErrorCode(err, http.StatusForbidden) { + return cloud + } + glog.Warningf("Failed to list backend services, retrying: %v", err) + } else { + glog.Warningf("Failed to retrieve cloud interface, retrying: %v", err) + } + time.Sleep(cloudClientRetryInterval) + } +} diff --git a/controllers/gce/rc.yaml b/controllers/gce/rc.yaml index e5838351f..252bc1175 100644 --- a/controllers/gce/rc.yaml +++ b/controllers/gce/rc.yaml @@ -24,18 +24,18 @@ metadata: name: l7-lb-controller labels: k8s-app: glbc - version: v0.9.4 + version: v0.9.6 spec: # There should never be more than 1 controller alive simultaneously. replicas: 1 selector: k8s-app: glbc - version: v0.9.4 + version: v0.9.6 template: metadata: labels: k8s-app: glbc - version: v0.9.4 + version: v0.9.6 name: glbc spec: terminationGracePeriodSeconds: 600 @@ -61,7 +61,7 @@ spec: requests: cpu: 10m memory: 20Mi - - image: gcr.io/google_containers/glbc:0.9.4 + - image: gcr.io/google_containers/glbc:0.9.6 livenessProbe: httpGet: path: /healthz diff --git a/controllers/gce/storage/configmaps.go b/controllers/gce/storage/configmaps.go index 89f1748ec..1c3089c15 100644 --- a/controllers/gce/storage/configmaps.go +++ b/controllers/gce/storage/configmaps.go @@ -23,10 +23,10 @@ import ( "github.com/golang/glog" + api_v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - api_v1 "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/tools/cache" ) diff --git a/controllers/gce/storage/configmaps_test.go b/controllers/gce/storage/configmaps_test.go index 42d6e47cb..2c40fdeae 100644 --- a/controllers/gce/storage/configmaps_test.go +++ b/controllers/gce/storage/configmaps_test.go @@ -19,7 +19,7 @@ package storage import ( "testing" - "k8s.io/client-go/pkg/api" + api "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestConfigMapUID(t *testing.T) { diff --git a/controllers/gce/utils/utils.go b/controllers/gce/utils/utils.go index 3b628ef6c..c0486f557 100644 --- a/controllers/gce/utils/utils.go +++ b/controllers/gce/utils/utils.go @@ -191,7 +191,6 @@ func (n *Namer) ParseName(name string) *NameComponents { // NameBelongsToCluster checks if a given name is tagged with this cluster's UID. func (n *Namer) NameBelongsToCluster(name string) bool { if !strings.HasPrefix(name, "k8s-") { - glog.V(4).Infof("%v not part of cluster", name) return false } parts := strings.Split(name, clusterNameDelimiter) @@ -203,7 +202,6 @@ func (n *Namer) NameBelongsToCluster(name string) bool { return false } if len(parts) > 2 { - glog.Warningf("Too many parts to name %v, ignoring", name) return false } return parts[1] == clusterName @@ -332,6 +330,20 @@ func IgnoreHTTPNotFound(err error) error { return err } +// IsInUsedByError returns true if the resource is being used by another GCP resource +func IsInUsedByError(err error) bool { + apiErr, ok := err.(*googleapi.Error) + if !ok || apiErr.Code != http.StatusBadRequest { + return false + } + return strings.Contains(apiErr.Message, "being used by") +} + +// IsNotFoundError returns true if the resource does not exist +func IsNotFoundError(err error) bool { + return IsHTTPErrorCode(err, http.StatusNotFound) +} + // CompareLinks returns true if the 2 self links are equal. func CompareLinks(l1, l2 string) bool { // TODO: These can be partial links @@ -341,3 +353,9 @@ func CompareLinks(l1, l2 string) bool { // FakeIngressRuleValueMap is a convenience type used by multiple submodules // that share the same testing methods. type FakeIngressRuleValueMap map[string]string + +// GetNamedPort creates the NamedPort API object for the given port. +func GetNamedPort(port int64) *compute.NamedPort { + // TODO: move port naming to namer + return &compute.NamedPort{Name: fmt.Sprintf("port%v", port), Port: port} +} diff --git a/controllers/nginx/.dockerignore b/controllers/nginx/.dockerignore new file mode 100644 index 000000000..7c09b3603 --- /dev/null +++ b/controllers/nginx/.dockerignore @@ -0,0 +1,2 @@ +core + diff --git a/controllers/nginx/Changelog.md b/controllers/nginx/Changelog.md index c39b098cc..4e01716b8 100644 --- a/controllers/nginx/Changelog.md +++ b/controllers/nginx/Changelog.md @@ -1,5 +1,286 @@ Changelog +Changelog + +### 0.9-beta.13 + +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13` + +*New Features:* + +- NGINX 1.3.5 +- New flag to disable node listing +- Custom X-Forwarder-Header (CloudFlare uses `CF-Connecting-IP` as header) +- Custom error page in Client Certificate Authentication + + +*Changes:* + +- [X] [#1272](https://github.com/kubernetes/ingress/pull/1272) Delete useless statement +- [X] [#1277](https://github.com/kubernetes/ingress/pull/1277) Add indent for nginx.conf +- [X] [#1278](https://github.com/kubernetes/ingress/pull/1278) Add proxy-pass-params annotation and Backend field +- [X] [#1282](https://github.com/kubernetes/ingress/pull/1282) Fix nginx stats +- [X] [#1288](https://github.com/kubernetes/ingress/pull/1288) Allow PATCH in enable-cors +- [X] [#1290](https://github.com/kubernetes/ingress/pull/1290) Add flag to disabling node listing +- [X] [#1293](https://github.com/kubernetes/ingress/pull/1293) Adds support for error page in Client Certificate Authentication +- [X] [#1308](https://github.com/kubernetes/ingress/pull/1308) A trivial typo in config +- [X] [#1310](https://github.com/kubernetes/ingress/pull/1310) Refactoring nginx configuration configmap +- [X] [#1311](https://github.com/kubernetes/ingress/pull/1311) Enable nginx async writes +- [X] [#1312](https://github.com/kubernetes/ingress/pull/1312) Allow custom forwarded for header +- [X] [#1313](https://github.com/kubernetes/ingress/pull/1313) Fix eol in nginx template +- [X] [#1315](https://github.com/kubernetes/ingress/pull/1315) Fix nginx custom error pages + + +*Documentation:* + +- [X] [#1270](https://github.com/kubernetes/ingress/pull/1270) add missing yamls in controllers/nginx +- [X] [#1276](https://github.com/kubernetes/ingress/pull/1276) Link rbac sample from deployment docs +- [X] [#1291](https://github.com/kubernetes/ingress/pull/1291) fix link to conformance suite +- [X] [#1295](https://github.com/kubernetes/ingress/pull/1295) fix README of nginx-ingress-controller +- [X] [#1299](https://github.com/kubernetes/ingress/pull/1299) fix two doc issues in nginx/README +- [X] [#1306](https://github.com/kubernetes/ingress/pull/1306) Fix kubeconfig example for nginx deployment + + +### 0.9-beta.12 + +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.12` + +*Breaking changes:* + +- SSL passthrough is disabled by default. To enable the feature use `--enable-ssl-passthrough` + +*New Features:* + +- Support for arm64 +- New flags to customize listen ports +- Per minute rate limiting +- Rate limit whitelist +- Configuration of nginx worker timeout (to avoid zombie nginx workers processes) +- Redirects from non-www to www +- Custom default backend (per Ingress) +- Graceful shutdown for NGINX + +*Changes:* + +- [X] [#977](https://github.com/kubernetes/ingress/pull/977) Add sort-backends command line option +- [X] [#981](https://github.com/kubernetes/ingress/pull/981) Add annotation to allow use of service ClusterIP for NGINX upstream. +- [X] [#991](https://github.com/kubernetes/ingress/pull/991) Remove secret sync loop +- [X] [#992](https://github.com/kubernetes/ingress/pull/992) Check errors generating pem files +- [X] [#993](https://github.com/kubernetes/ingress/pull/993) Fix the sed command to work on macOS +- [X] [#1013](https://github.com/kubernetes/ingress/pull/1013) The fields of vtsDate are unified in the form of plural +- [X] [#1025](https://github.com/kubernetes/ingress/pull/1025) Fix file watch +- [X] [#1027](https://github.com/kubernetes/ingress/pull/1027) Lint code +- [X] [#1031](https://github.com/kubernetes/ingress/pull/1031) Change missing secret name log level to V(3) +- [X] [#1032](https://github.com/kubernetes/ingress/pull/1032) Alternative syncSecret approach #1030 +- [X] [#1042](https://github.com/kubernetes/ingress/pull/1042) Add function to allow custom values in Ingress status +- [X] [#1043](https://github.com/kubernetes/ingress/pull/1043) Return reference to object providing Endpoint +- [X] [#1046](https://github.com/kubernetes/ingress/pull/1046) Add field FileSHA in BasicDigest struct +- [X] [#1058](https://github.com/kubernetes/ingress/pull/1058) add per minute rate limiting +- [X] [#1060](https://github.com/kubernetes/ingress/pull/1060) Update fsnotify dependency to fix arm64 issue +- [X] [#1065](https://github.com/kubernetes/ingress/pull/1065) Add more descriptive steps in Dev Documentation +- [X] [#1073](https://github.com/kubernetes/ingress/pull/1073) Release nginx-slim 0.22 +- [X] [#1074](https://github.com/kubernetes/ingress/pull/1074) Remove lua and use fastcgi to render errors +- [X] [#1075](https://github.com/kubernetes/ingress/pull/1075) (feat/ #374) support proxy timeout +- [X] [#1076](https://github.com/kubernetes/ingress/pull/1076) Add more ssl test cases +- [X] [#1078](https://github.com/kubernetes/ingress/pull/1078) fix the same udp port and tcp port, update nginx.conf error +- [X] [#1080](https://github.com/kubernetes/ingress/pull/1080) Disable platform s390x +- [X] [#1081](https://github.com/kubernetes/ingress/pull/1081) Spit Static check and Coverage in diff Stages of Travis CI +- [X] [#1082](https://github.com/kubernetes/ingress/pull/1082) Fix build tasks +- [X] [#1087](https://github.com/kubernetes/ingress/pull/1087) Release nginx-slim 0.23 +- [X] [#1088](https://github.com/kubernetes/ingress/pull/1088) Configure nginx worker timeout +- [X] [#1089](https://github.com/kubernetes/ingress/pull/1089) Update nginx to 1.13.4 +- [X] [#1098](https://github.com/kubernetes/ingress/pull/1098) Exposing the event recorder to allow other controllers to create events +- [X] [#1102](https://github.com/kubernetes/ingress/pull/1102) Fix lose SSL Passthrough +- [X] [#1104](https://github.com/kubernetes/ingress/pull/1104) Simplify verification of hostname in ssl certificates +- [X] [#1109](https://github.com/kubernetes/ingress/pull/1109) Cleanup remote address in nginx template +- [X] [#1110](https://github.com/kubernetes/ingress/pull/1110) Fix Endpoint comparison +- [X] [#1118](https://github.com/kubernetes/ingress/pull/1118) feat(#733)Support nginx bandwidth control +- [X] [#1124](https://github.com/kubernetes/ingress/pull/1124) check fields len in dns.go +- [X] [#1130](https://github.com/kubernetes/ingress/pull/1130) Update nginx.go +- [X] [#1134](https://github.com/kubernetes/ingress/pull/1134) replace deprecated interface with versioned ones +- [X] [#1136](https://github.com/kubernetes/ingress/pull/1136) Fix status update - changed in #1074 +- [X] [#1138](https://github.com/kubernetes/ingress/pull/1138) update nginx.go: preformance improve +- [X] [#1139](https://github.com/kubernetes/ingress/pull/1139) Fix Todo:convert sequence to table +- [X] [#1162](https://github.com/kubernetes/ingress/pull/1162) Optimize CI build time +- [X] [#1164](https://github.com/kubernetes/ingress/pull/1164) Use variable request_uri as redirect after auth +- [X] [#1179](https://github.com/kubernetes/ingress/pull/1179) Fix sticky upstream not used when enable rewrite +- [X] [#1184](https://github.com/kubernetes/ingress/pull/1184) Add support for temporal and permanent redirects +- [X] [#1185](https://github.com/kubernetes/ingress/pull/1185) Add more info about Server-Alias usage +- [X] [#1186](https://github.com/kubernetes/ingress/pull/1186) Add annotation for client-body-buffer-size per location +- [X] [#1190](https://github.com/kubernetes/ingress/pull/1190) Add flag to disable SSL passthrough +- [X] [#1193](https://github.com/kubernetes/ingress/pull/1193) fix broken link +- [X] [#1198](https://github.com/kubernetes/ingress/pull/1198) Add option for specific scheme for base url +- [X] [#1202](https://github.com/kubernetes/ingress/pull/1202) formatIP issue +- [X] [#1203](https://github.com/kubernetes/ingress/pull/1203) NGINX not reloading correctly +- [X] [#1204](https://github.com/kubernetes/ingress/pull/1204) Fix template error +- [X] [#1205](https://github.com/kubernetes/ingress/pull/1205) Add initial sync of secrets +- [X] [#1206](https://github.com/kubernetes/ingress/pull/1206) Update ssl-passthrough docs +- [X] [#1207](https://github.com/kubernetes/ingress/pull/1207) delete broken link +- [X] [#1208](https://github.com/kubernetes/ingress/pull/1208) fix some typo +- [X] [#1210](https://github.com/kubernetes/ingress/pull/1210) add rate limit whitelist +- [X] [#1215](https://github.com/kubernetes/ingress/pull/1215) Replace base64 encoding with random uuid +- [X] [#1218](https://github.com/kubernetes/ingress/pull/1218) Trivial fixes in core/pkg/net +- [X] [#1219](https://github.com/kubernetes/ingress/pull/1219) keep zones unique per ingress resource +- [X] [#1221](https://github.com/kubernetes/ingress/pull/1221) Move certificate authentication from location to server +- [X] [#1223](https://github.com/kubernetes/ingress/pull/1223) Add doc for non-www to www annotation +- [X] [#1224](https://github.com/kubernetes/ingress/pull/1224) refactor rate limit whitelist +- [X] [#1226](https://github.com/kubernetes/ingress/pull/1226) Remove useless variable in nginx.tmpl +- [X] [#1227](https://github.com/kubernetes/ingress/pull/1227) Update annotations doc with base-url-scheme +- [X] [#1233](https://github.com/kubernetes/ingress/pull/1233) Fix ClientBodyBufferSize annotation +- [X] [#1234](https://github.com/kubernetes/ingress/pull/1234) Lint code +- [X] [#1235](https://github.com/kubernetes/ingress/pull/1235) Fix Equal comparison +- [X] [#1236](https://github.com/kubernetes/ingress/pull/1236) Add Validation for Client Body Buffer Size +- [X] [#1238](https://github.com/kubernetes/ingress/pull/1238) Add support for 'client_body_timeout' and 'client_header_timeout' +- [X] [#1239](https://github.com/kubernetes/ingress/pull/1239) Add flags to customize listen ports and detect port collisions +- [X] [#1243](https://github.com/kubernetes/ingress/pull/1243) Add support for access-log-path and error-log-path +- [X] [#1244](https://github.com/kubernetes/ingress/pull/1244) Add custom default backend annotation +- [X] [#1246](https://github.com/kubernetes/ingress/pull/1246) Add additional headers when custom default backend is used +- [X] [#1247](https://github.com/kubernetes/ingress/pull/1247) Make Ingress annotations available in template +- [X] [#1248](https://github.com/kubernetes/ingress/pull/1248) Improve nginx controller performance +- [X] [#1254](https://github.com/kubernetes/ingress/pull/1254) fix Type transform panic +- [X] [#1257](https://github.com/kubernetes/ingress/pull/1257) Graceful shutdown for Nginx +- [X] [#1261](https://github.com/kubernetes/ingress/pull/1261) Add support for 'worker-shutdown-timeout' + + +*Documentation:* + +- [X] [#976](https://github.com/kubernetes/ingress/pull/976) Update annotations doc +- [X] [#979](https://github.com/kubernetes/ingress/pull/979) Missing auth example +- [X] [#980](https://github.com/kubernetes/ingress/pull/980) Add nginx basic auth example +- [X] [#1001](https://github.com/kubernetes/ingress/pull/1001) examples/nginx/rbac: Give access to own namespace +- [X] [#1005](https://github.com/kubernetes/ingress/pull/1005) Update configuration.md +- [X] [#1018](https://github.com/kubernetes/ingress/pull/1018) add docs for `proxy-set-headers` and `add-headers` +- [X] [#1038](https://github.com/kubernetes/ingress/pull/1038) typo / spelling in README.md +- [X] [#1039](https://github.com/kubernetes/ingress/pull/1039) typo in examples/tcp/nginx/README.md +- [X] [#1049](https://github.com/kubernetes/ingress/pull/1049) Fix config name in the example. +- [X] [#1054](https://github.com/kubernetes/ingress/pull/1054) Fix link to UDP example +- [X] [#1084](https://github.com/kubernetes/ingress/pull/1084) (issue #310)Fix some broken link +- [X] [#1103](https://github.com/kubernetes/ingress/pull/1103) Add GoDoc Widget +- [X] [#1105](https://github.com/kubernetes/ingress/pull/1105) Make Readme file more readable +- [X] [#1106](https://github.com/kubernetes/ingress/pull/1106) Update annotations.md +- [X] [#1107](https://github.com/kubernetes/ingress/pull/1107) Fix Broken Link +- [X] [#1119](https://github.com/kubernetes/ingress/pull/1119) fix typos in controllers/nginx/README.md +- [X] [#1122](https://github.com/kubernetes/ingress/pull/1122) Fix broken link +- [X] [#1131](https://github.com/kubernetes/ingress/pull/1131) Add short help doc in configuration for nginx limit rate +- [X] [#1143](https://github.com/kubernetes/ingress/pull/1143) Minor Typo Fix +- [X] [#1144](https://github.com/kubernetes/ingress/pull/1144) Minor Typo fix +- [X] [#1145](https://github.com/kubernetes/ingress/pull/1145) Minor Typo fix +- [X] [#1146](https://github.com/kubernetes/ingress/pull/1146) Fix Minor Typo in Readme +- [X] [#1147](https://github.com/kubernetes/ingress/pull/1147) Minor Typo Fix +- [X] [#1148](https://github.com/kubernetes/ingress/pull/1148) Minor Typo Fix in Getting-Started.md +- [X] [#1149](https://github.com/kubernetes/ingress/pull/1149) Fix Minor Typo in TLS authentication +- [X] [#1150](https://github.com/kubernetes/ingress/pull/1150) Fix Minor Typo in Customize the HAProxy configuration +- [X] [#1151](https://github.com/kubernetes/ingress/pull/1151) Fix Minor Typo in customization custom-template +- [X] [#1152](https://github.com/kubernetes/ingress/pull/1152) Fix minor typo in HAProxy Multi TLS certificate termination +- [X] [#1153](https://github.com/kubernetes/ingress/pull/1153) Fix minor typo in Multi TLS certificate termination +- [X] [#1154](https://github.com/kubernetes/ingress/pull/1154) Fix minor typo in Role Based Access Control +- [X] [#1155](https://github.com/kubernetes/ingress/pull/1155) Fix minor typo in TCP loadbalancing +- [X] [#1156](https://github.com/kubernetes/ingress/pull/1156) Fix minor typo in UDP loadbalancing +- [X] [#1157](https://github.com/kubernetes/ingress/pull/1157) Fix minor typos in Prerequisites +- [X] [#1158](https://github.com/kubernetes/ingress/pull/1158) Fix minor typo in Ingress examples +- [X] [#1159](https://github.com/kubernetes/ingress/pull/1159) Fix minor typos in Ingress admin guide +- [X] [#1160](https://github.com/kubernetes/ingress/pull/1160) Fix a broken href and typo in Ingress FAQ +- [X] [#1165](https://github.com/kubernetes/ingress/pull/1165) Update CONTRIBUTING.md +- [X] [#1168](https://github.com/kubernetes/ingress/pull/1168) finx link to running-locally.md +- [X] [#1170](https://github.com/kubernetes/ingress/pull/1170) Update dead link in nginx/HTTPS section +- [X] [#1172](https://github.com/kubernetes/ingress/pull/1172) Update README.md +- [X] [#1173](https://github.com/kubernetes/ingress/pull/1173) Update admin.md +- [X] [#1174](https://github.com/kubernetes/ingress/pull/1174) fix several titles +- [X] [#1177](https://github.com/kubernetes/ingress/pull/1177) fix typos +- [X] [#1188](https://github.com/kubernetes/ingress/pull/1188) Fix minor typo +- [X] [#1189](https://github.com/kubernetes/ingress/pull/1189) Fix sign in URL redirect parameter +- [X] [#1192](https://github.com/kubernetes/ingress/pull/1192) Update README.md +- [X] [#1195](https://github.com/kubernetes/ingress/pull/1195) Update troubleshooting.md +- [X] [#1196](https://github.com/kubernetes/ingress/pull/1196) Update README.md +- [X] [#1209](https://github.com/kubernetes/ingress/pull/1209) Update README.md +- [X] [#1085](https://github.com/kubernetes/ingress/pull/1085) Fix ConfigMap's namespace in custom configuration example for nginx +- [X] [#1142](https://github.com/kubernetes/ingress/pull/1142) Fix typo in multiple docs +- [X] [#1228](https://github.com/kubernetes/ingress/pull/1228) Update release doc in getting-started.md +- [X] [#1230](https://github.com/kubernetes/ingress/pull/1230) Update godep guide link + + +### 0.9-beta.11 + +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.11` + +Fixes NGINX [CVE-2017-7529](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-7529) + +*Changes:* + +- [X] [#659](https://github.com/kubernetes/ingress/pull/659) [nginx] TCP configmap should allow listen proxy_protocol per service +- [X] [#730](https://github.com/kubernetes/ingress/pull/730) Add support for add_headers +- [X] [#808](https://github.com/kubernetes/ingress/pull/808) HTTP->HTTPS redirect does not work with use-proxy-protocol: "true" +- [X] [#921](https://github.com/kubernetes/ingress/pull/921) Make proxy-real-ip-cidr a comma separated list +- [X] [#930](https://github.com/kubernetes/ingress/pull/930) Add support for proxy protocol in TCP services +- [X] [#933](https://github.com/kubernetes/ingress/pull/933) Lint code +- [X] [#937](https://github.com/kubernetes/ingress/pull/937) Fix lint code errors +- [X] [#940](https://github.com/kubernetes/ingress/pull/940) Sets parameters for a shared memory zone of limit_conn_zone +- [X] [#949](https://github.com/kubernetes/ingress/pull/949) fix nginx version to 1.13.3 to fix integer overflow +- [X] [#956](https://github.com/kubernetes/ingress/pull/956) Simplify handling of ssl certificates +- [X] [#958](https://github.com/kubernetes/ingress/pull/958) Release ubuntu-slim:0.13 +- [X] [#959](https://github.com/kubernetes/ingress/pull/959) Release nginx-slim 0.21 +- [X] [#960](https://github.com/kubernetes/ingress/pull/960) Update nginx in ingress controller +- [X] [#964](https://github.com/kubernetes/ingress/pull/964) Support for proxy_headers_hash_bucket_size and proxy_headers_hash_max_size +- [X] [#966](https://github.com/kubernetes/ingress/pull/966) Fix error checking for pod name & NS +- [X] [#967](https://github.com/kubernetes/ingress/pull/967) Fix runningAddresses typo +- [X] [#968](https://github.com/kubernetes/ingress/pull/968) Fix missing hyphen in yaml for nginx RBAC example +- [X] [#973](https://github.com/kubernetes/ingress/pull/973) check number of servers in configuration comparator + + +### 0.9-beta.10 + +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.10` + +Fix release 0.9-beta.9 + +### 0.9-beta.9 + +**Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.9` + +*New Features:* + +- Add support for arm and ppc64le + + +*Changes:* + +- [X] [#548](https://github.com/kubernetes/ingress/pull/548) nginx: support multidomain certificates +- [X] [#620](https://github.com/kubernetes/ingress/pull/620) [nginx] Listening ports are not configurable, so ingress can't be run multiple times per node when using CNI +- [X] [#648](https://github.com/kubernetes/ingress/pull/648) publish-service argument isn't honored when ELB is internal only facing. +- [X] [#833](https://github.com/kubernetes/ingress/pull/833) WIP: Avoid reloads implementing Equals in structs +- [X] [#838](https://github.com/kubernetes/ingress/pull/838) Feature request: Add ingress annotation to enable upstream "keepalive" option +- [X] [#844](https://github.com/kubernetes/ingress/pull/844) ingress annotations affinity is not working +- [X] [#862](https://github.com/kubernetes/ingress/pull/862) Avoid reloads implementing Equaler interface +- [X] [#864](https://github.com/kubernetes/ingress/pull/864) Remove dead code +- [X] [#868](https://github.com/kubernetes/ingress/pull/868) Lint nginx code +- [X] [#871](https://github.com/kubernetes/ingress/pull/871) Add feature to allow sticky sessions per location +- [X] [#873](https://github.com/kubernetes/ingress/pull/873) Update README.md +- [X] [#876](https://github.com/kubernetes/ingress/pull/876) Add information about nginx controller flags +- [X] [#878](https://github.com/kubernetes/ingress/pull/878) Update go to 1.8.3 +- [X] [#881](https://github.com/kubernetes/ingress/pull/881) Option to not remove loadBalancer status record? +- [X] [#882](https://github.com/kubernetes/ingress/pull/882) Add flag to skip the update of Ingress status on shutdown +- [X] [#885](https://github.com/kubernetes/ingress/pull/885) Don't use $proxy_protocol var which may be undefined. +- [X] [#886](https://github.com/kubernetes/ingress/pull/886) Add support for SubjectAltName in SSL certificates +- [X] [#888](https://github.com/kubernetes/ingress/pull/888) Update nginx-slim to 0.19 +- [X] [#889](https://github.com/kubernetes/ingress/pull/889) Add PHOST to backend +- [X] [#890](https://github.com/kubernetes/ingress/pull/890) Improve variable configuration for source IP address +- [X] [#892](https://github.com/kubernetes/ingress/pull/892) Add upstream keepalive connections cache +- [X] [#897](https://github.com/kubernetes/ingress/pull/897) Update outdated ingress resource link +- [X] [#898](https://github.com/kubernetes/ingress/pull/898) add error check right when reload nginx fail +- [X] [#899](https://github.com/kubernetes/ingress/pull/899) Fix nginx error check +- [X] [#900](https://github.com/kubernetes/ingress/pull/900) After #862 changes in the configmap do not trigger a reload +- [X] [#901](https://github.com/kubernetes/ingress/pull/901) [doc] Update NGinX status port to 18080 +- [X] [#902](https://github.com/kubernetes/ingress/pull/902) Always reload after a change in the configuration +- [X] [#904](https://github.com/kubernetes/ingress/pull/904) Fix nginx sticky sessions +- [X] [#906](https://github.com/kubernetes/ingress/pull/906) Fix race condition with closed channels +- [X] [#907](https://github.com/kubernetes/ingress/pull/907) nginx/proxy: allow specifying next upstream behaviour +- [X] [#910](https://github.com/kubernetes/ingress/pull/910) Feature request: use `X-Forwarded-Host` from the reverse proxy before +- [X] [#911](https://github.com/kubernetes/ingress/pull/911) Improve X-Forwarded-Host support +- [X] [#915](https://github.com/kubernetes/ingress/pull/915) Release nginx-slim 0.20 +- [X] [#916](https://github.com/kubernetes/ingress/pull/916) Add arm and ppc64le support +- [X] [#919](https://github.com/kubernetes/ingress/pull/919) Apply the 'ssl-redirect' annotation per-location +- [X] [#922](https://github.com/kubernetes/ingress/pull/922) Add example of TLS termination using a classic ELB + ### 0.9-beta.8 **Image:** `gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8` @@ -29,7 +310,7 @@ Changelog - [X] [#829](https://github.com/kubernetes/ingress/pull/829) feat(template): wrap IPv6 addresses in [] - [X] [#786](https://github.com/kubernetes/ingress/pull/786) Update echoserver image version in examples - [X] [#825](https://github.com/kubernetes/ingress/pull/825) Create or delete ingress based on class annotation -- [X] [#790](https://github.com/kubernetes/ingress/pull/790) #789 removing duplicate X-Real-IP header +- [X] [#790](https://github.com/kubernetes/ingress/pull/790) #789 removing duplicate X-Real-IP header - [X] [#792](https://github.com/kubernetes/ingress/pull/792) Avoid checking if the controllers are synced - [X] [#798](https://github.com/kubernetes/ingress/pull/798) nginx: RBAC for leader election - [X] [#799](https://github.com/kubernetes/ingress/pull/799) could not build variables_hash @@ -52,7 +333,7 @@ Changelog *Changes:* -- [X] [#777](https://github.com/kubernetes/ingress/pull/777) Update sniff parser to fix index out of bound error +- [X] [#777](https://github.com/kubernetes/ingress/pull/777) Update sniff parser to fix index out of bound error ### 0.9-beta.6 @@ -109,8 +390,8 @@ Changelog - [X] [#663](https://github.com/kubernetes/ingress/pull/663) Remove helper required in go < 1.8 - [X] [#662](https://github.com/kubernetes/ingress/pull/662) Add debug information about ingress class -- [X] [#661](https://github.com/kubernetes/ingress/pull/661) Avoid running nginx if the configuration file is empty -- [X] [#660](https://github.com/kubernetes/ingress/pull/660) Rollback queue refactoring +- [X] [#661](https://github.com/kubernetes/ingress/pull/661) Avoid running nginx if the configuration file is empty +- [X] [#660](https://github.com/kubernetes/ingress/pull/660) Rollback queue refactoring - [X] [#654](https://github.com/kubernetes/ingress/pull/654) Update go version to 1.8 @@ -140,13 +421,13 @@ Changelog - [X] [#595](https://github.com/kubernetes/ingress/pull/595) Remove Host header from auth_request proxy configuration - [X] [#588](https://github.com/kubernetes/ingress/pull/588) Read resolv.conf file just once - [X] [#586](https://github.com/kubernetes/ingress/pull/586) Updated instructions to create an ingress controller build -- [X] [#583](https://github.com/kubernetes/ingress/pull/583) fixed lua_package_path in nginx.tmpl +- [X] [#583](https://github.com/kubernetes/ingress/pull/583) fixed lua_package_path in nginx.tmpl - [X] [#580](https://github.com/kubernetes/ingress/pull/580) Updated faq for running multiple ingress controller - [X] [#579](https://github.com/kubernetes/ingress/pull/579) Detect if the ingress controller is running with multiple replicas - [X] [#578](https://github.com/kubernetes/ingress/pull/578) Set different listeners per protocol version - [X] [#577](https://github.com/kubernetes/ingress/pull/577) Avoid zombie child processes - [X] [#576](https://github.com/kubernetes/ingress/pull/576) Replace secret workqueue -- [X] [#568](https://github.com/kubernetes/ingress/pull/568) Revert merge annotations to the implicit root context +- [X] [#568](https://github.com/kubernetes/ingress/pull/568) Revert merge annotations to the implicit root context - [X] [#563](https://github.com/kubernetes/ingress/pull/563) Add option to disable hsts preload - [X] [#560](https://github.com/kubernetes/ingress/pull/560) Fix intermittent misconfiguration of backend.secure and SessionAffinity - [X] [#556](https://github.com/kubernetes/ingress/pull/556) Update nginx version and remove dumb-init @@ -170,7 +451,7 @@ Changelog - [X] [#512](https://github.com/kubernetes/ingress/pull/512) Fix typos regarding the ssl-passthrough annotation documentation - [X] [#505](https://github.com/kubernetes/ingress/pull/505) add unit test cases for core/pkg/ingress/controller/annotations - [X] [#503](https://github.com/kubernetes/ingress/pull/503) Add example for nginx in aws -- [X] [#502](https://github.com/kubernetes/ingress/pull/502) Add information about SSL Passthrough annotation +- [X] [#502](https://github.com/kubernetes/ingress/pull/502) Add information about SSL Passthrough annotation - [X] [#500](https://github.com/kubernetes/ingress/pull/500) Improve TLS secret configuration - [X] [#498](https://github.com/kubernetes/ingress/pull/498) Proper enqueue a secret on the secret queue - [X] [#493](https://github.com/kubernetes/ingress/pull/493) Update nginx and vts module @@ -179,7 +460,7 @@ Changelog - [X] [#485](https://github.com/kubernetes/ingress/pull/485) Fix typo nginx configMap vts metrics customization - [X] [#481](https://github.com/kubernetes/ingress/pull/481) Remove unnecessary quote in nginx log format - [X] [#471](https://github.com/kubernetes/ingress/pull/471) prometheus scrape annotations -- [X] [#460](https://github.com/kubernetes/ingress/pull/460) add example of 'run multiple haproxy ingress controllers as a deployment' +- [X] [#460](https://github.com/kubernetes/ingress/pull/460) add example of 'run multiple haproxy ingress controllers as a deployment' - [X] [#459](https://github.com/kubernetes/ingress/pull/459) Add information about SSL certificates in the default log level - [X] [#456](https://github.com/kubernetes/ingress/pull/456) Avoid upstreams with multiple servers with the same port - [X] [#454](https://github.com/kubernetes/ingress/pull/454) Pass request port to real server @@ -211,55 +492,55 @@ Changelog *Changes:* -- [X] [#433](https://github.com/kubernetes/ingress/pull/433) close over the ingress variable or the last assignment will be used -- [X] [#424](https://github.com/kubernetes/ingress/pull/424) Manually sync secrets from certificate authentication annotations -- [X] [#423](https://github.com/kubernetes/ingress/pull/423) Scrap json metrics from nginx vts module when enabled -- [X] [#418](https://github.com/kubernetes/ingress/pull/418) Only update Ingress status for the configured class -- [X] [#415](https://github.com/kubernetes/ingress/pull/415) Improve external authentication docs -- [X] [#410](https://github.com/kubernetes/ingress/pull/410) Add support for "signin url" -- [X] [#409](https://github.com/kubernetes/ingress/pull/409) Allow custom http2 header sizes -- [X] [#408](https://github.com/kubernetes/ingress/pull/408) Review docs -- [X] [#406](https://github.com/kubernetes/ingress/pull/406) Add debug info and fix spelling -- [X] [#402](https://github.com/kubernetes/ingress/pull/402) allow specifying custom dh param +- [X] [#433](https://github.com/kubernetes/ingress/pull/433) close over the ingress variable or the last assignment will be used +- [X] [#424](https://github.com/kubernetes/ingress/pull/424) Manually sync secrets from certificate authentication annotations +- [X] [#423](https://github.com/kubernetes/ingress/pull/423) Scrap json metrics from nginx vts module when enabled +- [X] [#418](https://github.com/kubernetes/ingress/pull/418) Only update Ingress status for the configured class +- [X] [#415](https://github.com/kubernetes/ingress/pull/415) Improve external authentication docs +- [X] [#410](https://github.com/kubernetes/ingress/pull/410) Add support for "signin url" +- [X] [#409](https://github.com/kubernetes/ingress/pull/409) Allow custom http2 header sizes +- [X] [#408](https://github.com/kubernetes/ingress/pull/408) Review docs +- [X] [#406](https://github.com/kubernetes/ingress/pull/406) Add debug info and fix spelling +- [X] [#402](https://github.com/kubernetes/ingress/pull/402) allow specifying custom dh param - [X] [#397](https://github.com/kubernetes/ingress/pull/397) Fix external auth -- [X] [#394](https://github.com/kubernetes/ingress/pull/394) Update README.md +- [X] [#394](https://github.com/kubernetes/ingress/pull/394) Update README.md - [X] [#392](https://github.com/kubernetes/ingress/pull/392) Fix http2 header size -- [X] [#391](https://github.com/kubernetes/ingress/pull/391) remove tmp nginx-diff files -- [X] [#390](https://github.com/kubernetes/ingress/pull/390) Fix RateLimit comment -- [X] [#385](https://github.com/kubernetes/ingress/pull/385) add Copyright -- [X] [#382](https://github.com/kubernetes/ingress/pull/382) Ingress Fake Certificate generation -- [X] [#380](https://github.com/kubernetes/ingress/pull/380) Fix custom log format -- [X] [#373](https://github.com/kubernetes/ingress/pull/373) Cleanup -- [X] [#371](https://github.com/kubernetes/ingress/pull/371) add configuration to disable listening on ipv6 -- [X] [#370](https://github.com/kubernetes/ingress/pull/270) Add documentation for ingress.kubernetes.io/force-ssl-redirect -- [X] [#369](https://github.com/kubernetes/ingress/pull/369) Minor text fix for "ApiServer" +- [X] [#391](https://github.com/kubernetes/ingress/pull/391) remove tmp nginx-diff files +- [X] [#390](https://github.com/kubernetes/ingress/pull/390) Fix RateLimit comment +- [X] [#385](https://github.com/kubernetes/ingress/pull/385) add Copyright +- [X] [#382](https://github.com/kubernetes/ingress/pull/382) Ingress Fake Certificate generation +- [X] [#380](https://github.com/kubernetes/ingress/pull/380) Fix custom log format +- [X] [#373](https://github.com/kubernetes/ingress/pull/373) Cleanup +- [X] [#371](https://github.com/kubernetes/ingress/pull/371) add configuration to disable listening on ipv6 +- [X] [#370](https://github.com/kubernetes/ingress/pull/270) Add documentation for ingress.kubernetes.io/force-ssl-redirect +- [X] [#369](https://github.com/kubernetes/ingress/pull/369) Minor text fix for "ApiServer" - [X] [#367](https://github.com/kubernetes/ingress/pull/367) BuildLogFormatUpstream was always using the default log-format -- [X] [#366](https://github.com/kubernetes/ingress/pull/366) add_judgment -- [X] [#365](https://github.com/kubernetes/ingress/pull/365) add ForceSSLRedirect ingress annotation -- [X] [#364](https://github.com/kubernetes/ingress/pull/364) Fix error caused by increasing proxy_buffer_size (#363) -- [X] [#362](https://github.com/kubernetes/ingress/pull/362) Fix ingress class -- [X] [#360](https://github.com/kubernetes/ingress/pull/360) add example of 'run multiple nginx ingress controllers as a deployment' -- [X] [#358](https://github.com/kubernetes/ingress/pull/358) Checks if the TLS secret contains a valid keypair structure -- [X] [#356](https://github.com/kubernetes/ingress/pull/356) Disable listen only on ipv6 and fix proxy_protocol -- [X] [#354](https://github.com/kubernetes/ingress/pull/354) add judgment -- [X] [#352](https://github.com/kubernetes/ingress/pull/352) Add ability to customize upstream and stream log format -- [X] [#351](https://github.com/kubernetes/ingress/pull/351) Enable custom election id for status sync. -- [X] [#347](https://github.com/kubernetes/ingress/pull/347) Fix client source IP address +- [X] [#366](https://github.com/kubernetes/ingress/pull/366) add_judgment +- [X] [#365](https://github.com/kubernetes/ingress/pull/365) add ForceSSLRedirect ingress annotation +- [X] [#364](https://github.com/kubernetes/ingress/pull/364) Fix error caused by increasing proxy_buffer_size (#363) +- [X] [#362](https://github.com/kubernetes/ingress/pull/362) Fix ingress class +- [X] [#360](https://github.com/kubernetes/ingress/pull/360) add example of 'run multiple nginx ingress controllers as a deployment' +- [X] [#358](https://github.com/kubernetes/ingress/pull/358) Checks if the TLS secret contains a valid keypair structure +- [X] [#356](https://github.com/kubernetes/ingress/pull/356) Disable listen only on ipv6 and fix proxy_protocol +- [X] [#354](https://github.com/kubernetes/ingress/pull/354) add judgment +- [X] [#352](https://github.com/kubernetes/ingress/pull/352) Add ability to customize upstream and stream log format +- [X] [#351](https://github.com/kubernetes/ingress/pull/351) Enable custom election id for status sync. +- [X] [#347](https://github.com/kubernetes/ingress/pull/347) Fix client source IP address - [X] [#345](https://github.com/kubernetes/ingress/pull/345) Fix lint error -- [X] [#344](https://github.com/kubernetes/ingress/pull/344) Refactoring of TCP and UDP services -- [X] [#343](https://github.com/kubernetes/ingress/pull/343) Fix node lister when --watch-namespace is used -- [X] [#341](https://github.com/kubernetes/ingress/pull/341) Do not run coverage check in the default target. -- [X] [#340](https://github.com/kubernetes/ingress/pull/340) Add support for specify proxy cookie path/domain -- [X] [#337](https://github.com/kubernetes/ingress/pull/337) Fix for formatting error introduced in #304 -- [X] [#335](https://github.com/kubernetes/ingress/pull/335) Fix for vet complaints: -- [X] [#332](https://github.com/kubernetes/ingress/pull/332) Add annotation to customize nginx configuration -- [X] [#331](https://github.com/kubernetes/ingress/pull/331) Correct spelling mistake -- [X] [#328](https://github.com/kubernetes/ingress/pull/328) fix misspell "affinity" in main.go -- [X] [#326](https://github.com/kubernetes/ingress/pull/326) add nginx daemonset example -- [X] [#311](https://github.com/kubernetes/ingress/pull/311) Sort stream service ports to avoid extra reloads +- [X] [#344](https://github.com/kubernetes/ingress/pull/344) Refactoring of TCP and UDP services +- [X] [#343](https://github.com/kubernetes/ingress/pull/343) Fix node lister when --watch-namespace is used +- [X] [#341](https://github.com/kubernetes/ingress/pull/341) Do not run coverage check in the default target. +- [X] [#340](https://github.com/kubernetes/ingress/pull/340) Add support for specify proxy cookie path/domain +- [X] [#337](https://github.com/kubernetes/ingress/pull/337) Fix for formatting error introduced in #304 +- [X] [#335](https://github.com/kubernetes/ingress/pull/335) Fix for vet complaints: +- [X] [#332](https://github.com/kubernetes/ingress/pull/332) Add annotation to customize nginx configuration +- [X] [#331](https://github.com/kubernetes/ingress/pull/331) Correct spelling mistake +- [X] [#328](https://github.com/kubernetes/ingress/pull/328) fix misspell "affinity" in main.go +- [X] [#326](https://github.com/kubernetes/ingress/pull/326) add nginx daemonset example +- [X] [#311](https://github.com/kubernetes/ingress/pull/311) Sort stream service ports to avoid extra reloads - [X] [#307](https://github.com/kubernetes/ingress/pull/307) Add docs for body-size annotation -- [X] [#306](https://github.com/kubernetes/ingress/pull/306) modify nginx readme -- [X] [#304](https://github.com/kubernetes/ingress/pull/304) change 'buildSSPassthrouthUpstreams' to 'buildSSLPassthroughUpstreams' +- [X] [#306](https://github.com/kubernetes/ingress/pull/306) modify nginx readme +- [X] [#304](https://github.com/kubernetes/ingress/pull/304) change 'buildSSPassthrouthUpstreams' to 'buildSSLPassthroughUpstreams' ### 0.9-beta.2 @@ -300,9 +581,9 @@ Changelog - [X] [#227](https://github.com/kubernetes/ingress/pull/227) proxy_protocol on ssl_passthrough listener - [X] [#223](https://github.com/kubernetes/ingress/pull/223) Fix panic if a tempfile cannot be created - [X] [#220](https://github.com/kubernetes/ingress/pull/220) Fixes for minikube usage instructions. -- [X] [#219](https://github.com/kubernetes/ingress/pull/219) Fix typo, add a couple of links. +- [X] [#219](https://github.com/kubernetes/ingress/pull/219) Fix typo, add a couple of links. - [X] [#218](https://github.com/kubernetes/ingress/pull/218) Improve links from CONTRIBUTING. -- [X] [#217](https://github.com/kubernetes/ingress/pull/217) Fix an e2e link. +- [X] [#217](https://github.com/kubernetes/ingress/pull/217) Fix an e2e link. - [X] [#212](https://github.com/kubernetes/ingress/pull/212) Simplify code to obtain TCP or UDP services - [X] [#208](https://github.com/kubernetes/ingress/pull/208) Fix nil HTTP field - [X] [#198](https://github.com/kubernetes/ingress/pull/198) Add an example for static-ip and deployment diff --git a/controllers/nginx/Makefile b/controllers/nginx/Makefile index b1ff792ef..3293c1be9 100644 --- a/controllers/nginx/Makefile +++ b/controllers/nginx/Makefile @@ -3,10 +3,16 @@ all: push BUILDTAGS= # Use the 0.0 tag for testing, it shouldn't clobber any release builds -RELEASE?=0.9.0-beta.8 -PREFIX?=gcr.io/google_containers/nginx-ingress-controller +TAG?=0.9.0-beta.13 +REGISTRY?=gcr.io/google_containers GOOS?=linux DOCKER?=gcloud docker -- +SED_I?=sed -i +GOHOSTOS ?= $(shell go env GOHOSTOS) + +ifeq ($(GOHOSTOS),darwin) + SED_I=sed -i '' +endif REPO_INFO=$(shell git config --get remote.origin.url) @@ -16,16 +22,93 @@ endif PKG=k8s.io/ingress/controllers/nginx +ARCH ?= $(shell go env GOARCH) +GOARCH = ${ARCH} +DUMB_ARCH = ${ARCH} + +ALL_ARCH = amd64 arm arm64 ppc64le + +QEMUVERSION=v2.9.1 + +IMGNAME = nginx-ingress-controller +IMAGE = $(REGISTRY)/$(IMGNAME) +MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) + +# Set default base image dynamically for each arch +BASEIMAGE?=gcr.io/google_containers/nginx-slim-$(ARCH):0.24 + +ifeq ($(ARCH),arm) + QEMUARCH=arm + GOARCH=arm + DUMB_ARCH=armhf +endif +ifeq ($(ARCH),arm64) + QEMUARCH=aarch64 +endif +ifeq ($(ARCH),ppc64le) + QEMUARCH=ppc64le + GOARCH=ppc64le + DUMB_ARCH=ppc64el +endif +#ifeq ($(ARCH),s390x) +# QEMUARCH=s390x +#endif + +TEMP_DIR := $(shell mktemp -d) + +DOCKERFILE := $(TEMP_DIR)/rootfs/Dockerfile + +all: all-container + +sub-container-%: + $(MAKE) ARCH=$* build container + +sub-push-%: + $(MAKE) ARCH=$* push + +all-container: $(addprefix sub-container-,$(ALL_ARCH)) + +all-push: $(addprefix sub-push-,$(ALL_ARCH)) + +container: .container-$(ARCH) +.container-$(ARCH): + cp -r ./* $(TEMP_DIR) + $(SED_I) 's|BASEIMAGE|$(BASEIMAGE)|g' $(DOCKERFILE) + $(SED_I) "s|QEMUARCH|$(QEMUARCH)|g" $(DOCKERFILE) + $(SED_I) "s|DUMB_ARCH|$(DUMB_ARCH)|g" $(DOCKERFILE) + +ifeq ($(ARCH),amd64) + # When building "normally" for amd64, remove the whole line, it has no part in the amd64 image + $(SED_I) "/CROSS_BUILD_/d" $(DOCKERFILE) +else + # When cross-building, only the placeholder "CROSS_BUILD_" should be removed + # Register /usr/bin/qemu-ARCH-static as the handler for ARM binaries in the kernel + $(DOCKER) run --rm --privileged multiarch/qemu-user-static:register --reset + curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)/rootfs + $(SED_I) "s/CROSS_BUILD_//g" $(DOCKERFILE) +endif + + $(DOCKER) build -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR)/rootfs + +ifeq ($(ARCH), amd64) + # This is for to maintain the backward compatibility + $(DOCKER) tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):$(TAG) +endif + +push: .push-$(ARCH) +.push-$(ARCH): + $(DOCKER) push $(MULTI_ARCH_IMG):$(TAG) +ifeq ($(ARCH), amd64) + $(DOCKER) push $(IMAGE):$(TAG) +endif + +clean: + $(DOCKER) rmi -f $(MULTI_ARCH_IMG):$(TAG) || true + build: clean - CGO_ENABLED=0 GOOS=${GOOS} go build -a -installsuffix cgo \ - -ldflags "-s -w -X ${PKG}/pkg/version.RELEASE=${RELEASE} -X ${PKG}/pkg/version.COMMIT=${COMMIT} -X ${PKG}/pkg/version.REPO=${REPO_INFO}" \ - -o rootfs/nginx-ingress-controller ${PKG}/pkg/cmd/controller - -container: build - $(DOCKER) build --pull -t $(PREFIX):$(RELEASE) rootfs - -push: container - $(DOCKER) push $(PREFIX):$(RELEASE) + CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -a -installsuffix cgo \ + -ldflags "-s -w -X ${PKG}/pkg/version.RELEASE=${TAG} -X ${PKG}/pkg/version.COMMIT=${COMMIT} -X ${PKG}/pkg/version.REPO=${REPO_INFO}" \ + -o ${TEMP_DIR}/rootfs/nginx-ingress-controller ${PKG}/pkg/cmd/controller fmt: @echo "+ $@" @@ -49,5 +132,5 @@ vet: @echo "+ $@" @go vet $(shell go list ${PKG}/... | grep -v vendor) -clean: - rm -f rootfs/nginx-ingress-controller +release: all-container all-push + echo "done" diff --git a/controllers/nginx/README.md b/controllers/nginx/README.md index 1ecbe5556..8eb52a696 100644 --- a/controllers/nginx/README.md +++ b/controllers/nginx/README.md @@ -5,6 +5,7 @@ This is an nginx Ingress controller that uses [ConfigMap](https://github.com/kub ## Contents * [Conventions](#conventions) * [Requirements](#requirements) +* [Command line arguments](#command-line-arguments) * [Dry running](#try-running-the-ingress-controller) * [Deployment](#deployment) * [HTTP](#http) @@ -41,6 +42,61 @@ Anytime we reference a tls secret, we mean (x509, pem encoded, RSA 2048, etc). Y - Default backend [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) +## Command line arguments +``` +Usage of : + --alsologtostderr log to standard error as well as files + --apiserver-host string The address of the Kubernetes Apiserver to connect to in the format of protocol://address:port, e.g., http://localhost:8080. If not specified, the assumption is that the binary runs inside a Kubernetes cluster and local discovery is attempted. + --configmap string Name of the ConfigMap that contains the custom configuration to use + --default-backend-service string Service used to serve a 404 page for the default backend. Takes the form + namespace/name. The controller uses the first node port of this Service for + the default backend. + --default-server-port int Default port to use for exposing the default server (catch all) (default 8181) + --default-ssl-certificate string Name of the secret + that contains a SSL certificate to be used as default for a HTTPS catch-all server + --disable-node-list Disable querying nodes. If --force-namespace-isolation is true, this should also be set. + --election-id string Election id to use for status update. (default "ingress-controller-leader") + --enable-ssl-passthrough Enable SSL passthrough feature. Default is disabled + --force-namespace-isolation Force namespace isolation. This flag is required to avoid the reference of secrets or + configmaps located in a different namespace than the specified in the flag --watch-namespace. + --health-check-path string Defines + the URL to be used as health check inside in the default server in NGINX. (default "/healthz") + --healthz-port int port for healthz endpoint. (default 10254) + --http-port int Indicates the port to use for HTTP traffic (default 80) + --https-port int Indicates the port to use for HTTPS traffic (default 443) + --ingress-class string Name of the ingress class to route through this controller. + --kubeconfig string Path to kubeconfig file with authorization and master location information. + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --logtostderr log to standard error instead of files + --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) + --publish-service string Service fronting the ingress controllers. Takes the form + namespace/name. The controller will set the endpoint records on the + ingress objects to reflect those on the service. + --sort-backends Defines if backends and it's endpoints should be sorted + --ssl-passtrough-proxy-port int Default port to use internally for SSL when SSL Passthgough is enabled (default 442) + --status-port int Indicates the TCP port to use for exposing the nginx status page (default 18080) + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + --sync-period duration Relist and confirm cloud resources this often. Default is 10 minutes (default 10m0s) + --tcp-services-configmap string Name of the ConfigMap that contains the definition of the TCP services to expose. + The key in the map indicates the external port to be used. The value is the name of the + service with the format namespace/serviceName and the port of the service could be a + number of the name of the port. + The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend + --udp-services-configmap string Name of the ConfigMap that contains the definition of the UDP services to expose. + The key in the map indicates the external port to be used. The value is the name of the + service with the format namespace/serviceName and the port of the service could be a + number of the name of the port. + --update-status Indicates if the + ingress controller should update the Ingress status IP/hostname. Default is true (default true) + --update-status-on-shutdown Indicates if the + ingress controller should update the Ingress status IP/hostname when the controller + is being stopped. Default is true (default true) + -v, --v Level log level for V logs + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --watch-namespace string Namespace to watch for Ingress. Default is to watch all namespaces +``` + ## Try running the Ingress controller Before deploying the controller to production you might want to run it outside the cluster and observe it. @@ -53,23 +109,20 @@ $ ./rootfs/nginx-ingress-controller --running-in-cluster=false --default-backend ## Deployment -First create a default backend: +First create a default backend and it's corresponding service: ``` -$ kubectl create -f examples/deployment/nginx/default-backend.yaml -$ kubectl expose rc default-http-backend --port=80 --target-port=8080 --name=default-http-backend +$ kubectl create -f examples/default-backend.yaml ``` +Follow the [example-deployment](../../examples/deployment/nginx/README.md) steps to deploy nginx-ingress-controller in Kubernetes cluster (you may prefer other type of workloads, like Daemonset, in production environment). Loadbalancers are created via a ReplicationController or Daemonset: -``` -$ kubectl create -f examples/default/rc-default.yaml -``` ## HTTP First we need to deploy some application to publish. To keep this simple we will use the [echoheaders app](https://github.com/kubernetes/contrib/blob/master/ingress/echoheaders/echo-app.yaml) that just returns information about the http request as output ``` -kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.5 --replicas=1 --port=8080 +kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.8 --replicas=1 --port=8080 ``` Now we expose the same application in two different services (so we can create different Ingress rules) @@ -138,9 +191,9 @@ spec: serviceName: s1 servicePort: 80 ``` -Please follow [test.sh](https://github.com/bprashanth/Ingress/blob/master/examples/sni/nginx/test.sh) as a guide on how to generate secrets containing SSL certificates. The name of the secret can be different than the name of the certificate. +Please follow [PREREQUISITES](../../examples/PREREQUISITES.md) as a guide on how to generate secrets containing SSL certificates. The name of the secret can be different than the name of the certificate. -Check the [example](examples/tls/README.md) +Check the [example](../../examples/tls-termination/nginx) ### Default SSL Certificate @@ -264,8 +317,8 @@ To disable this behavior use `hsts=false` in the NGINX config map. ### Automated Certificate Management with Kube-Lego -[Kube-Lego] automatically requests missing certificates or expired from -[Let's Encrypt] by monitoring ingress resources and its referenced secrets. To +[Kube-Lego] automatically requests missing or expired certificates from +[Let's Encrypt] by monitoring ingress resources and their referenced secrets. To enable this for an ingress resource you have to add an annotation: ``` @@ -281,8 +334,8 @@ version to fully support Kube-Lego is nginx Ingress controller 0.8. ## Exposing TCP services -Ingress does not support TCP services (yet). For this reason this Ingress controller uses the flag `--tcp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `:` -It is possible to use a number or the name of the port. +Ingress does not support TCP services (yet). For this reason this Ingress controller uses the flag `--tcp-services-configmap` to point to an existing config map where the key is the external port to use and the value is `::[PROXY]` +It is possible to use a number or the name of the port. The last field is optional. Adding `PROXY` in the last field we can enable Proxy Protocol in a TCP service. The next example shows how to expose the service `example-go` running in the namespace `default` in the port `8080` using the port `9000` ``` @@ -315,7 +368,7 @@ data: ``` -Please check the [udp services](examples/udp/README.md) example +Please check the [udp services](../../examples/udp/nginx/README.md) example ## Proxy Protocol @@ -338,7 +391,7 @@ Using this two headers is possible to use a custom backend service like [this on The ngx_http_stub_status_module module provides access to basic status information. This is the default module active in the url `/nginx_status`. This controller provides an alternative to this module using [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) third party module. -To use this module just provide a config map with the key `enable-vts-status=true`. The URL is exposed in the port 8080. +To use this module just provide a config map with the key `enable-vts-status=true`. The URL is exposed in the port 18080. Please check the example `example/rc-default.yaml` ![nginx-module-vts screenshot](https://cloud.githubusercontent.com/assets/3648408/10876811/77a67b70-8183-11e5-9924-6a6d0c5dc73a.png "screenshot with filter") @@ -399,7 +452,7 @@ Description: ### Local cluster -Using [`hack/local-up-cluster.sh`](https://github.com/kubernetes/kubernetes/blob/master/hack/local-up-cluster.sh) is possible to start a local kubernetes cluster consisting of a master and a single node. Please read [running-locally.md](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/running-locally.md) for more details. +Using [`hack/local-up-cluster.sh`](https://github.com/kubernetes/kubernetes/blob/master/hack/local-up-cluster.sh) is possible to start a local kubernetes cluster consisting of a master and a single node. Please read [running-locally.md](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md) for more details. Use of `hostNetwork: true` in the ingress controller is required to falls back at localhost:8080 for the apiserver if every other client creation check fails (eg: service account not present, kubeconfig doesn't exist, no master env vars...) diff --git a/controllers/nginx/configuration.md b/controllers/nginx/configuration.md index 48be84dae..07c2285b0 100644 --- a/controllers/nginx/configuration.md +++ b/controllers/nginx/configuration.md @@ -1,4 +1,5 @@ ## Contents + * [Customizing NGINX](#customizing-nginx) * [Custom NGINX configuration](#custom-nginx-configuration) * [Custom NGINX template](#custom-nginx-template) @@ -7,6 +8,7 @@ * [Authentication](#authentication) * [Rewrite](#rewrite) * [Rate limiting](#rate-limiting) +* [SSL Passthrough](#ssl-passthrough) * [Secure backends](#secure-backends) * [Server-side HTTPS enforcement through redirect](#server-side-https-enforcement-through-redirect) * [Whitelist source range](#whitelist-source-range) @@ -17,7 +19,6 @@ * [Retries in non-idempotent methods](#retries-in-non-idempotent-methods) * [Custom max body size](#custom-max-body-size) - ### Customizing NGINX There are 3 ways to customize NGINX: @@ -26,11 +27,9 @@ There are 3 ways to customize NGINX: 2. [annotations](#annotations): use this if you want a specific configuration for the site defined in the Ingress rule. 3. custom template: when more specific settings are required, like [open_file_cache](http://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache), custom [log_format](http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format), adjust [listen](http://nginx.org/en/docs/http/ngx_http_core_module.html#listen) options as `rcvbuf` or when is not possible to change an through the ConfigMap. - #### Custom NGINX configuration It is possible to customize the defaults in NGINX using a ConfigMap. - Please check the [custom configuration](../../examples/customization/custom-configuration/nginx/README.md) example. #### Annotations @@ -48,24 +47,29 @@ The following annotations are supported: |[ingress.kubernetes.io/auth-url](#external-authentication)|string| |[ingress.kubernetes.io/auth-tls-secret](#certificate-authentication)|string| |[ingress.kubernetes.io/auth-tls-verify-depth](#certificate-authentication)|number| +|[ingress.kubernetes.io/auth-tls-error-page](#certificate-authentication)|string| +|[ingress.kubernetes.io/base-url-scheme](#rewrite)|string| +|[ingress.kubernetes.io/client-body-buffer-size](#client-body-buffer-size)|string| |[ingress.kubernetes.io/configuration-snippet](#configuration-snippet)|string| +|[ingress.kubernetes.io/default-backend](#default-backend)|string| |[ingress.kubernetes.io/enable-cors](#enable-cors)|true or false| |[ingress.kubernetes.io/force-ssl-redirect](#server-side-https-enforcement-through-redirect)|true or false| +|[ingress.kubernetes.io/from-to-www-redirect](#redirect-from-to-www)|true or false| |[ingress.kubernetes.io/limit-connections](#rate-limiting)|number| |[ingress.kubernetes.io/limit-rps](#rate-limiting)|number| -|[ingress.kubernetes.io/ssl-passthrough](#ssl-passthrough)|true or false| |[ingress.kubernetes.io/proxy-body-size](#custom-max-body-size)|string| |[ingress.kubernetes.io/rewrite-target](#rewrite)|URI| |[ingress.kubernetes.io/secure-backends](#secure-backends)|true or false| +|[ingress.kubernetes.io/server-alias](#server-alias)|string| +|[ingress.kubernetes.io/service-upstream](#service-upstream)|true or false| |[ingress.kubernetes.io/session-cookie-name](#cookie-affinity)|string| |[ingress.kubernetes.io/session-cookie-hash](#cookie-affinity)|string| |[ingress.kubernetes.io/ssl-redirect](#server-side-https-enforcement-through-redirect)|true or false| +|[ingress.kubernetes.io/ssl-passthrough](#ssl-passthrough)|true or false| |[ingress.kubernetes.io/upstream-max-fails](#custom-nginx-upstream-checks)|number| |[ingress.kubernetes.io/upstream-fail-timeout](#custom-nginx-upstream-checks)|number| |[ingress.kubernetes.io/whitelist-source-range](#whitelist-source-range)|CIDR| - - #### Custom NGINX template The NGINX template is located in the file `/etc/nginx/template/nginx.tmpl`. Mounting a volume is possible to use a custom version. @@ -82,11 +86,10 @@ In addition to the built-in functions provided by the Go package the following f - hasSuffix: [strings.HasSuffix](https://golang.org/pkg/strings/#HasSuffix) - toUpper: [strings.ToUpper](https://golang.org/pkg/strings/#ToUpper) - toLower: [strings.ToLower](https://golang.org/pkg/strings/#ToLower) - - buildLocation: helper to build the NGINX Location section in each server + - buildLocation: helps to build the NGINX Location section in each server - buildProxyPass: builds the reverse proxy configuration - - buildRateLimitZones: helper to build all the required rate limit zones - - buildRateLimit: helper to build a limit zone inside a location if contains a rate limit annotation - + - buildRateLimitZones: helps to build all the required rate limit zones + - buildRateLimit: helps to build a limit zone inside a location if contains a rate limit annotation ### Custom NGINX upstream checks @@ -106,13 +109,11 @@ In NGINX, backend server pools are called "[upstreams](http://nginx.org/en/docs/ Please check the [custom upstream check](../../examples/customization/custom-upstream-check/README.md) example. - ### Authentication -Is possible to add authentication adding additional annotations in the Ingress rule. The source of the authentication is a secret that contains usernames and passwords inside the the key `auth`. +Is possible to add authentication adding additional annotations in the Ingress rule. The source of the authentication is a secret that contains usernames and passwords inside the key `auth`. The annotations are: - ``` ingress.kubernetes.io/auth-type: [basic|digest] ``` @@ -130,14 +131,13 @@ The secret must be created in the same namespace as the Ingress rule. ingress.kubernetes.io/auth-realm: "realm string" ``` -Please check the [auth](/examples/auth/nginx/README.md) example. +Please check the [auth](/examples/auth/basic/nginx/README.md) example. ### Certificate Authentication It's possible to enable Certificate based authentication using additional annotations in Ingress Rule. The annotations are: - ``` ingress.kubernetes.io/auth-tls-secret: secretName ``` @@ -150,22 +150,60 @@ ingress.kubernetes.io/auth-tls-verify-depth The validation depth between the provided client certificate and the Certification Authority chain. +``` +ingress.kubernetes.io/auth-tls-error-page +``` + +The URL/Page that user should be redirected in case of a Certificate Authentication Error + Please check the [tls-auth](/examples/auth/client-certs/nginx/README.md) example. ### Configuration snippet -Using this annotion you can add additional configuration to the NGINX location. For example: +Using this annotation you can add additional configuration to the NGINX location. For example: ``` ingress.kubernetes.io/configuration-snippet: | more_set_headers "Request-Id: $request_id"; ``` +### Default Backend + +The ingress controller requires a default backend. This service is handle the response when the service in the Ingress rule does not have endpoints. +This is a global configuration for the ingress controller. In some cases could be required to return a custom content or format. In this scenario we can use the annotation `ingress.kubernetes.io/default-backend: ` to specify a custom default backend. ### Enable CORS To enable Cross-Origin Resource Sharing (CORS) in an Ingress rule add the annotation `ingress.kubernetes.io/enable-cors: "true"`. This will add a section in the server location enabling this functionality. For more information please check https://enable-cors.org/server_nginx.html +### Server Alias + +To add Server Aliases to an Ingress rule add the annotation `ingress.kubernetes.io/server-alias: ""`. +This will create a server with the same configuration, but a different server_name as the provided host. + +*Note:* A server-alias name cannot conflict with the hostname of an existing server. If it does the server-alias +annotation will be ignored. If a server-alias is created and later a new server with the same hostname is created +the new server configuration will take place over the alias configuration. + +For more information please see http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name + +### Client Body Buffer Size + +Sets buffer size for reading client request body per location. In case the request body is larger than the buffer, +the whole body or only its part is written to a temporary file. By default, buffer size is equal to two memory pages. +This is 8K on x86, other 32-bit platforms, and x86-64. It is usually 16K on other 64-bit platforms. This annotation is +applied to each location provided in the ingress rule. + +*Note:* The annotation value must be given in a valid format otherwise the +For example to set the client-body-buffer-size the following can be done: +* `ingress.kubernetes.io/client-body-buffer-size: "1000"` # 1000 bytes +* `ingress.kubernetes.io/client-body-buffer-size: 1k` # 1 kilobyte +* `ingress.kubernetes.io/client-body-buffer-size: 1K` # 1 kilobyte +* `ingress.kubernetes.io/client-body-buffer-size: 1m` # 1 megabyte +* `ingress.kubernetes.io/client-body-buffer-size: 1M` # 1 megabyte + +For more information please see http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size + ### External Authentication To use an existing service that provides authentication the Ingress rule can be annotated with `ingress.kubernetes.io/auth-url` to indicate the URL where the HTTP request should be sent. @@ -177,7 +215,6 @@ ingress.kubernetes.io/auth-url: "URL to the authentication service" Please check the [external-auth](/examples/auth/external-auth/nginx/README.md) example. - ### Rewrite In some scenarios the exposed URL in the backend service differs from the specified path in the Ingress rule. Without a rewrite any request will return 404. @@ -185,42 +222,72 @@ Set the annotation `ingress.kubernetes.io/rewrite-target` to the path expected b If the application contains relative links it is possible to add an additional annotation `ingress.kubernetes.io/add-base-url` that will prepend a [`base` tag](https://developer.mozilla.org/en/docs/Web/HTML/Element/base) in the header of the returned HTML from the backend. +If the scheme of [`base` tag](https://developer.mozilla.org/en/docs/Web/HTML/Element/base) need to be specific, set the annotation `ingress.kubernetes.io/base-url-scheme` to the scheme such as `http` and `https`. + If the Application Root is exposed in a different path and needs to be redirected, set the annotation `ingress.kubernetes.io/app-root` to redirect requests for `/`. Please check the [rewrite](/examples/rewrite/nginx/README.md) example. - ### Rate limiting -The annotations `ingress.kubernetes.io/limit-connections` and `ingress.kubernetes.io/limit-rps` define a limit on the connections that can be opened by a single client IP address. This can be used to mitigate [DDoS Attacks](https://www.nginx.com/blog/mitigating-ddos-attacks-with-nginx-and-nginx-plus). +The annotations `ingress.kubernetes.io/limit-connections`, `ingress.kubernetes.io/limit-rps`, and `ingress.kubernetes.io/limit-rpm` define a limit on the connections that can be opened by a single client IP address. This can be used to mitigate [DDoS Attacks](https://www.nginx.com/blog/mitigating-ddos-attacks-with-nginx-and-nginx-plus). `ingress.kubernetes.io/limit-connections`: number of concurrent connections allowed from a single IP address. `ingress.kubernetes.io/limit-rps`: number of connections that may be accepted from a given IP each second. -If you specify both annotations in a single Ingress rule, `limit-rps` takes precedence. +`ingress.kubernetes.io/limit-rpm`: number of connections that may be accepted from a given IP each minute. +You can specify the client IP source ranges to be excluded from rate-limiting through the `ingress.kubernetes.io/limit-whitelist` annotation. The value is a comma separated list of CIDRs. + +If you specify multiple annotations in a single Ingress rule, `limit-rpm`, and then `limit-rps` takes precedence. + +The annotation `ingress.kubernetes.io/limit-rate`, `ingress.kubernetes.io/limit-rate-after` define a limit the rate of response transmission to a client. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit. + +`ingress.kubernetes.io/limit-rate-after`: sets the initial amount after which the further transmission of a response to a client will be rate limited. + +`ingress.kubernetes.io/limit-rate`: rate of request that accepted from a client each second. + +To configure this setting globally for all Ingress rules, the `limit-rate-after` and `limit-rate` value may be set in the NGINX ConfigMap. if you set the value in ingress annotation will cover global setting. ### SSL Passthrough The annotation `ingress.kubernetes.io/ssl-passthrough` allows to configure TLS termination in the pod and not in NGINX. -This is possible thanks to the [ngx_stream_ssl_preread_module](https://nginx.org/en/docs/stream/ngx_stream_ssl_preread_module.html) that enables the extraction of the server name information requested through SNI from the ClientHello message at the preread phase. - -**Important:** using the annotation `ingress.kubernetes.io/ssl-passthrough` invalidates all the other available annotations. This is because SSL Passthrough works in L4 (TCP). +**Important:** +- Using the annotation `ingress.kubernetes.io/ssl-passthrough` invalidates all the other available annotations. This is because SSL Passthrough works in L4 (TCP). +- The use of this annotation requires the flag `--enable-ssl-passthrough` (By default it is disabled) ### Secure backends By default NGINX uses `http` to reach the services. Adding the annotation `ingress.kubernetes.io/secure-backends: "true"` in the Ingress rule changes the protocol to `https`. +### Service Upstream + +By default the NGINX ingress controller uses a list of all endpoints (Pod IP/port) in the NGINX upstream configuration. This annotation disables that behavior and instead uses a single upstream in NGINX, the service's Cluster IP and port. This can be desirable for things like zero-downtime deployments as it reduces the need to reload NGINX configuration when Pods come up and down. See issue [#257](https://github.com/kubernetes/ingress/issues/257). + +#### Known Issues + +If the `service-upstream` annotation is specified the following things should be taken into consideration: + +* Sticky Sessions will not work as only round-robin load balancing is supported. +* The `proxy_next_upstream` directive will not have any effect meaning on error the request will not be dispatched to another upstream. ### Server-side HTTPS enforcement through redirect -By default the controller redirects (301) to `HTTPS` if TLS is enabled for that ingress. If you want to disable that behaviour globally, you can use `ssl-redirect: "false"` in the NGINX config map. +By default the controller redirects (301) to `HTTPS` if TLS is enabled for that ingress. If you want to disable that behavior globally, you can use `ssl-redirect: "false"` in the NGINX config map. To configure this feature for specific ingress resources, you can use the `ingress.kubernetes.io/ssl-redirect: "false"` annotation in the particular resource. -When using SSL offloading outside of cluster (e.g. AWS ELB) it may be usefull to enforce a redirect to `HTTPS` even when there is not TLS cert available. This can be achieved by using the `ingress.kubernetes.io/force-ssl-redirect: "true"` annotation in the particular resource. +When using SSL offloading outside of cluster (e.g. AWS ELB) it may be useful to enforce a redirect to `HTTPS` even when there is not TLS cert available. This can be achieved by using the `ingress.kubernetes.io/force-ssl-redirect: "true"` annotation in the particular resource. + +### Redirect from to www + +In some scenarios is required to redirect from `www.domain.com` to `domain.com` or viceversa. +To enable this feature use the annotation `ingress.kubernetes.io/from-to-www-redirect: "true"` + +**Important:** +If at some point a new Ingress is created with a host equal to one of the options (like `domain.com`) the annotation will be omitted. ### Whitelist source range @@ -233,67 +300,53 @@ To configure this setting globally for all Ingress rules, the `whitelist-source- Please check the [whitelist](/examples/affinity/cookie/nginx/README.md) example. - ### Session Affinity The annotation `ingress.kubernetes.io/affinity` enables and sets the affinity type in all Upstreams of an Ingress. This way, a request will always be directed to the same upstream server. - The only affinity type available for NGINX is `cookie`. - #### Cookie affinity If you use the ``cookie`` type you can also specify the name of the cookie that will be used to route the requests with the annotation `ingress.kubernetes.io/session-cookie-name`. The default is to create a cookie named 'route'. In case of NGINX the annotation `ingress.kubernetes.io/session-cookie-hash` defines which algorithm will be used to 'hash' the used upstream. Default value is `md5` and possible values are `md5`, `sha1` and `index`. -The `index` option is not hashed, an in-memory index is used instead, it's quicker and the overhead is shorter Warning: the matching against upstream servers list is inconsistent. So, at reload, if upstreams servers has changed, index values are not guaranted to correspond to the same server as before! USE IT WITH CAUTION and only if you need to! +The `index` option is not hashed, an in-memory index is used instead, it's quicker and the overhead is shorter Warning: the matching against upstream servers list is inconsistent. So, at reload, if upstreams servers has changed, index values are not guaranteed to correspond to the same server as before! USE IT WITH CAUTION and only if you need to! In NGINX this feature is implemented by the third party module [nginx-sticky-module-ng](https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng). The workflow used to define which upstream server will be used is explained [here](https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng/raw/08a395c66e425540982c00482f55034e1fee67b6/docs/sticky.pdf) - - ### **Allowed parameters in configuration ConfigMap** **proxy-body-size:** Sets the maximum allowed size of the client request body. See NGINX [client_max_body_size](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size). - - **custom-http-errors:** Enables which HTTP codes should be passed for processing with the [error_page directive](http://nginx.org/en/docs/http/ngx_http_core_module.html#error_page). Setting at least one code also enables [proxy_intercept_errors](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors) which are required to process error_page. Example usage: `custom-http-errors: 404,415` - **disable-access-log:** Disables the Access Log from the entire Ingress Controller. This is 'false' by default. +**access-log-path:** Access log path. Goes to '/var/log/nginx/access.log' by default. http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log + +**error-log-path:** Error log path. Goes to '/var/log/nginx/error.log' by default. http://nginx.org/en/docs/ngx_core_module.html#error_log **disable-ipv6:** Disable listening on IPV6. This is 'false' by default. - **enable-dynamic-tls-records:** Enables dynamically sized TLS records to improve time-to-first-byte. Enabled by default. See [CloudFlare's blog](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency) for more information. - -**enable-underscores-in-headers:** Enables underscores in header names. This is disabled by default. - **enable-underscores-in-headers:** Enables underscores in header names. This is disabled by default. **enable-vts-status:** Allows the replacement of the default status page with a third party module named [nginx-module-vts](https://github.com/vozlt/nginx-module-vts). - **error-log-level:** Configures the logging level of errors. Log levels above are listed in the order of increasing severity. http://nginx.org/en/docs/ngx_core_module.html#error_log - **gzip-types:** Sets the MIME types in addition to "text/html" to compress. The special value "\*" matches any MIME type. Responses with the "text/html" type are always compressed if `use-gzip` is enabled. - **hsts:** Enables or disables the header HSTS in servers running SSL. HTTP Strict Transport Security (often abbreviated as HSTS) is a security feature (HTTP header) that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. It provides protection against protocol downgrade attacks and cookie theft. https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security https://blog.qualys.com/securitylabs/2016/03/28/the-importance-of-a-proper-http-strict-transport-security-implementation-on-your-web-server - -**hsts-include-subdomains:** Enables or disables the use of HSTS in all the subdomains of the servername. - +**hsts-include-subdomains:** Enables or disables the use of HSTS in all the subdomains of the server-name. **hsts-max-age:** Sets the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. @@ -306,7 +359,7 @@ The zero value disables keep-alive client connections. http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout **load-balance:** Sets the algorithm to use for load balancing. The value can either be round_robin to -use the default round robin load balancer, least_conn to use the least connected method, or +use the default round robin loadbalancer, least_conn to use the least connected method, or ip_hash to use a hash of the server for routing. The default is least_conn. http://nginx.org/en/docs/http/load_balancing.html. @@ -321,59 +374,56 @@ log-format-upstream: '{ "time": "$time_iso8601", "remote_addr": "$proxy_protocol $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", "request_query": "$args", "request_length": $request_length, "duration": $request_time, "method": "$request_method", "http_referrer": "$http_referer", "http_user_agent": - "$http_user_agent" }' + "$http_user_agent" }' ``` -**log-format-stream:** Sets the nginx [stream format](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#log_format) -. +**log-format-stream:** Sets the nginx [stream format](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#log_format). - **max-worker-connections:** Sets the maximum number of simultaneous connections that can be opened by each [worker process](http://nginx.org/en/docs/ngx_core_module.html#worker_connections). - **proxy-buffer-size:** Sets the size of the buffer used for [reading the first part of the response](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) received from the proxied server. This part usually contains a small response header. - **proxy-connect-timeout:** Sets the timeout for [establishing a connection with a proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_connect_timeout). It should be noted that this timeout cannot usually exceed 75 seconds. - **proxy-cookie-domain:** Sets a text that [should be changed in the domain attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain) of the “Set-Cookie” header fields of a proxied server response. - **proxy-cookie-path:** Sets a text that [should be changed in the path attribute](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_path) of the “Set-Cookie” header fields of a proxied server response. - **proxy-read-timeout:** Sets the timeout in seconds for [reading a response from the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout). The timeout is set only between two successive read operations, not for the transmission of the whole response. - **proxy-send-timeout:** Sets the timeout in seconds for [transmitting a request to the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout). The timeout is set only between two successive write operations, not for the transmission of the whole request. +**proxy-next-upstream:** Specifies in [which cases](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream) a request should be passed to the next server. + +**proxy-request-buffering:** Enables or disables [buffering of a client request body](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering). **retry-non-idempotent:** Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error in the upstream server. The previous behavior can be restored using the value "true". - **server-name-hash-bucket-size:** Sets the size of the bucket for the server names hash tables. http://nginx.org/en/docs/hash.html http://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_bucket_size - **server-name-hash-max-size:** Sets the maximum size of the [server names hash tables](http://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_max_size) used in server names, map directive’s values, MIME types, names of request header strings, etc. http://nginx.org/en/docs/hash.html +**proxy-headers-hash-bucket-size:** Sets the size of the bucket for the proxy headers hash tables. +http://nginx.org/en/docs/hash.html +https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_bucket_size + +**proxy-headers-hash-max-size:** Sets the maximum size of the proxy headers hash tables. +http://nginx.org/en/docs/hash.html +https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_max_size **server-tokens:** Send NGINX Server header in responses and display NGINX version in error pages. Enabled by default. - **map-hash-bucket-size:** Sets the bucket size for the [map variables hash tables](http://nginx.org/en/docs/http/ngx_http_map_module.html#map_hash_bucket_size). The details of setting up hash tables are provided in a separate [document](http://nginx.org/en/docs/hash.html). - **ssl-buffer-size:** Sets the size of the [SSL buffer](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_buffer_size) used for sending data. The default of 4k helps NGINX to improve TLS Time To First Byte (TTTFB). https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/ - **ssl-ciphers:** Sets the [ciphers](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers) list to enable. The ciphers are specified in the format understood by the OpenSSL library. The default cipher list is: @@ -384,13 +434,11 @@ The recommendation above prioritizes algorithms that provide perfect [forward se Please check the [Mozilla SSL Configuration Generator](https://mozilla.github.io/server-side-tls/ssl-config-generator/). - **ssl-dh-param:** Sets the name of the secret that contains Diffie-Hellman key to help with "Perfect Forward Secrecy". https://www.openssl.org/docs/manmaster/apps/dhparam.html https://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam - **ssl-protocols:** Sets the [SSL protocols](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols) to use. The default is: `TLSv1 TLSv1.1 TLSv1.2`. @@ -402,44 +450,41 @@ If you don't need to support these clients please remove `TLSv1` to improve secu Please check the result of the configuration using `https://ssllabs.com/ssltest/analyze.html` or `https://testssl.sh`. - **ssl-redirect:** Sets the global value of redirects (301) to HTTPS if the server has a TLS certificate (defined in an Ingress rule) Default is "true". - **ssl-session-cache:** Enables or disables the use of shared [SSL cache](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) among worker processes. - **ssl-session-cache-size:** Sets the size of the [SSL shared session cache](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache) between all worker processes. - **ssl-session-tickets:** Enables or disables session resumption through [TLS session tickets](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_tickets). - **ssl-session-timeout:** Sets the time during which a client may [reuse the session](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_timeout) parameters stored in a cache. - **upstream-max-fails:** Sets the number of unsuccessful attempts to communicate with the [server](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) that should happen in the duration set by the `fail_timeout` parameter to consider the server unavailable. - **upstream-fail-timeout:** Sets the time during which the specified number of unsuccessful attempts to communicate with the [server](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) should happen to consider the server unavailable. - **use-gzip:** Enables or disables compression of HTTP responses using the ["gzip" module](http://nginx.org/en/docs/http/ngx_http_gzip_module.html) The default mime type list to compress is: `application/atom+xml application/javascript aplication/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component`. - **use-http2:** Enables or disables [HTTP/2](http://nginx.org/en/docs/http/ngx_http_v2_module.html) support in secure connections. - **use-proxy-protocol:** Enables or disables the [PROXY protocol](https://www.nginx.com/resources/admin-guide/proxy-protocol/) to receive client connection (real IP address) information passed through proxy servers and load balancers such as HAProxy and Amazon Elastic Load Balancer (ELB). - **whitelist-source-range:** Sets the default whitelisted IPs for each `server` block. This can be overwritten by an annotation on an Ingress rule. See [ngx_http_access_module](http://nginx.org/en/docs/http/ngx_http_access_module.html). - **worker-processes:** Sets the number of [worker processes](http://nginx.org/en/docs/ngx_core_module.html#worker_processes). The default of "auto" means number of available CPU cores. +**worker-shutdown-timeout:** Sets a timeout for Nginx to [wait for worker to gracefully shutdown](http://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout). The default is "10s". + +**limit-conn-zone-variable:** Sets parameters for a shared memory zone that will keep states for various keys of [limit_conn_zone](http://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone). The default of "$binary_remote_addr" variable’s size is always 4 bytes for IPv4 addresses or 16 bytes for IPv6 addresses. + +**proxy-set-headers:** Sets custom headers from a configmap before sending traffic to backends. See [example](https://github.com/kubernetes/ingress/tree/master/examples/customization/custom-headers/nginx) + +**add-headers:** Sets custom headers from a configmap before sending traffic to the client. See `proxy-set-headers` [example](https://github.com/kubernetes/ingress/tree/master/examples/customization/custom-headers/nginx) + +**bind-address:** Sets the addresses on which the server will accept requests instead of *. It should be noted that these addresses must exist in the runtime environment or the controller will crash loop. ### Default configuration options @@ -460,13 +505,14 @@ The following table shows the options, the default value and a description. |hsts-max-age|"15724800"| |hsts-preload|"false"| |ignore-invalid-headers|"true"| -|keep-alive|"75"| +|keep-alive|"75"| |log-format-stream|[$time_local] $protocol $status $bytes_sent $bytes_received $session_time| -|log-format-upstream|[$the_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status| +|log-format-upstream|[$the_real_ip] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status| |map-hash-bucket-size|"64"| |max-worker-connections|"16384"| |proxy-body-size|same as body-size| |proxy-buffer-size|"4k"| +|proxy-request-buffering|"on"| |proxy-connect-timeout|"5"| |proxy-cookie-domain|"off"| |proxy-cookie-path|"off"| @@ -487,12 +533,14 @@ The following table shows the options, the default value and a description. |ssl-session-timeout|10m| |use-gzip|"true"| |use-http2|"true"| +|upstream-keepalive-connections|"0" (disabled)| |variables-hash-bucket-size|64| |variables-hash-max-size|2048| |vts-status-zone-size|10m| |whitelist-source-range|permit all| |worker-processes|number of CPUs| - +|limit-conn-zone-variable|$binary_remote_addr| +|bind-address|| ### Websockets @@ -501,23 +549,19 @@ Support for websockets is provided by NGINX out of the box. No special configura The only requirement to avoid the close of connections is the increase of the values of `proxy-read-timeout` and `proxy-send-timeout`. The default value of this settings is `60 seconds`. A more adequate value to support websockets is a value higher than one hour (`3600`). - ### Optimizing TLS Time To First Byte (TTTFB) NGINX provides the configuration option [ssl_buffer_size](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_buffer_size) to allow the optimization of the TLS record size. This improves the [Time To First Byte](https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/) (TTTFB). The default value in the Ingress controller is `4k` (NGINX default is `16k`). - ### Retries in non-idempotent methods Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error. The previous behavior can be restored using `retry-non-idempotent=true` in the configuration ConfigMap. - ### Custom max body size For NGINX, 413 error will be returned to the client when the size in a request exceeds the maximum allowed size of the client request body. This size can be configured by the parameter [`client_max_body_size`](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size). To configure this setting globally for all Ingress rules, the `proxy-body-size` value may be set in the NGINX ConfigMap. - To use custom values in an Ingress rule define these annotation: ``` diff --git a/controllers/nginx/examples/default-backend.yaml b/controllers/nginx/examples/default-backend.yaml new file mode 100644 index 000000000..3c40989a3 --- /dev/null +++ b/controllers/nginx/examples/default-backend.yaml @@ -0,0 +1,51 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: default-http-backend + labels: + k8s-app: default-http-backend + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: default-http-backend + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: default-http-backend + # Any image is permissable as long as: + # 1. It serves a 404 page at / + # 2. It serves 200 on a /healthz endpoint + image: gcr.io/google_containers/defaultbackend:1.0 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: default-http-backend + namespace: kube-system + labels: + k8s-app: default-http-backend +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + k8s-app: default-http-backend diff --git a/controllers/nginx/examples/echo-header.yaml b/controllers/nginx/examples/echo-header.yaml new file mode 100644 index 000000000..a0fa1a4bf --- /dev/null +++ b/controllers/nginx/examples/echo-header.yaml @@ -0,0 +1,46 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: echoheaders +spec: + replicas: 1 + template: + metadata: + labels: + app: echoheaders + spec: + containers: + - name: echoheaders + image: gcr.io/google_containers/echoserver:1.8 + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: echoheaders-x + labels: + app: echoheaders-x +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: echoheaders +--- +apiVersion: v1 +kind: Service +metadata: + name: echoheaders-y + labels: + app: echoheaders-y +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: echoheaders \ No newline at end of file diff --git a/controllers/nginx/examples/ingress.yaml b/controllers/nginx/examples/ingress.yaml new file mode 100644 index 000000000..8a78b85b9 --- /dev/null +++ b/controllers/nginx/examples/ingress.yaml @@ -0,0 +1,26 @@ +# This is the Ingress resource that creates a HTTP Loadbalancer configured +# according to the Ingress rules. +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: echomap +spec: + rules: + - host: foo.bar.com + http: + paths: + - path: /foo + backend: + serviceName: echoheaders-x + servicePort: 80 + - host: bar.baz.com + http: + paths: + - path: /bar + backend: + serviceName: echoheaders-y + servicePort: 80 + - path: /foo + backend: + serviceName: echoheaders-x + servicePort: 80 \ No newline at end of file diff --git a/controllers/nginx/pkg/cmd/controller/main.go b/controllers/nginx/pkg/cmd/controller/main.go index 8086d413f..d9cdf95c1 100644 --- a/controllers/nginx/pkg/cmd/controller/main.go +++ b/controllers/nginx/pkg/cmd/controller/main.go @@ -23,17 +23,15 @@ import ( "time" "github.com/golang/glog" - "k8s.io/ingress/core/pkg/ingress/controller" ) func main() { // start a new nginx controller ngx := newNGINXController() - // create a custom Ingress controller using NGINX as backend - ic := controller.NewIngressController(ngx) - go handleSigterm(ic) + + go handleSigterm(ngx) // start the controller - ic.Start() + ngx.Start() // wait glog.Infof("shutting down Ingress controller...") for { @@ -42,14 +40,14 @@ func main() { } } -func handleSigterm(ic *controller.GenericController) { +func handleSigterm(ngx *NGINXController) { signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGTERM) <-signalChan glog.Infof("Received SIGTERM, shutting down") exitCode := 0 - if err := ic.Stop(); err != nil { + if err := ngx.Stop(); err != nil { glog.Infof("Error during shutdown %v", err) exitCode = 1 } diff --git a/controllers/nginx/pkg/cmd/controller/metrics.go b/controllers/nginx/pkg/cmd/controller/metrics.go index b803e6756..ee33c6c5e 100644 --- a/controllers/nginx/pkg/cmd/controller/metrics.go +++ b/controllers/nginx/pkg/cmd/controller/metrics.go @@ -24,7 +24,7 @@ import ( ) const ( - ngxStatusPath = "/internal_nginx_status" + ngxStatusPath = "/nginx_status" ngxVtsPath = "/nginx_status/format/json" ) @@ -45,6 +45,8 @@ type statsCollector struct { namespace string watchClass string + + port int } func (s *statsCollector) stop(sm statusModule) { @@ -52,29 +54,28 @@ func (s *statsCollector) stop(sm statusModule) { case defaultStatusModule: s.basic.Stop() prometheus.Unregister(s.basic) - break case vtsStatusModule: s.vts.Stop() prometheus.Unregister(s.vts) - break } } func (s *statsCollector) start(sm statusModule) { switch sm { case defaultStatusModule: - s.basic = collector.NewNginxStatus(s.namespace, s.watchClass, ngxHealthPort, ngxStatusPath) + s.basic = collector.NewNginxStatus(s.namespace, s.watchClass, s.port, ngxStatusPath) prometheus.Register(s.basic) break case vtsStatusModule: - s.vts = collector.NewNGINXVTSCollector(s.namespace, s.watchClass, ngxHealthPort, ngxVtsPath) + s.vts = collector.NewNGINXVTSCollector(s.namespace, s.watchClass, s.port, ngxVtsPath) prometheus.Register(s.vts) break } } -func newStatsCollector(ns, class, binary string) *statsCollector { +func newStatsCollector(ns, class, binary string, port int) *statsCollector { glog.Infof("starting new nginx stats collector for Ingress controller running in namespace %v (class %v)", ns, class) + glog.Infof("collector extracting information from port %v", port) pc, err := collector.NewNamedProcess(true, collector.BinaryNameMatcher{ Name: "nginx", Binary: binary, @@ -91,5 +92,6 @@ func newStatsCollector(ns, class, binary string) *statsCollector { namespace: ns, watchClass: class, process: pc, + port: port, } } diff --git a/controllers/nginx/pkg/cmd/controller/nginx.go b/controllers/nginx/pkg/cmd/controller/nginx.go index 8c3a5184b..bfcdfe4ff 100644 --- a/controllers/nginx/pkg/cmd/controller/nginx.go +++ b/controllers/nginx/pkg/cmd/controller/nginx.go @@ -31,15 +31,19 @@ import ( "time" "github.com/golang/glog" + "github.com/mitchellh/go-ps" "github.com/spf13/pflag" proxyproto "github.com/armon/go-proxyproto" - api_v1 "k8s.io/client-go/pkg/api/v1" + api "k8s.io/api/core/v1" + api_v1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/controllers/nginx/pkg/config" ngx_template "k8s.io/ingress/controllers/nginx/pkg/template" "k8s.io/ingress/controllers/nginx/pkg/version" "k8s.io/ingress/core/pkg/ingress" + "k8s.io/ingress/core/pkg/ingress/controller" "k8s.io/ingress/core/pkg/ingress/defaults" "k8s.io/ingress/core/pkg/net/dns" "k8s.io/ingress/core/pkg/net/ssl" @@ -48,11 +52,12 @@ import ( type statusModule string const ( - ngxHealthPort = 18080 ngxHealthPath = "/healthz" defaultStatusModule statusModule = "default" vtsStatusModule statusModule = "vts" + + defUpstreamName = "upstream-default-backend" ) var ( @@ -65,7 +70,7 @@ var ( // newNGINXController creates a new NGINX Ingress controller. // If the environment variable NGINX_BINARY exists it will be used // as source for nginx commands -func newNGINXController() ingress.Controller { +func newNGINXController() *NGINXController { ngx := os.Getenv("NGINX_BINARY") if ngx == "" { ngx = binary @@ -77,51 +82,14 @@ func newNGINXController() ingress.Controller { } n := &NGINXController{ - binary: ngx, - configmap: &api_v1.ConfigMap{}, - isIPV6Enabled: isIPv6Enabled(), - resolver: h, - proxy: &proxy{ - Default: &server{ - Hostname: "localhost", - IP: "127.0.0.1", - Port: 442, - ProxyProtocol: true, - }, - }, + binary: ngx, + configmap: &api_v1.ConfigMap{}, + isIPV6Enabled: isIPv6Enabled(), + resolver: h, + ports: &config.ListenPorts{}, + backendDefaults: config.NewDefault().Backend, } - listener, err := net.Listen("tcp", ":443") - if err != nil { - glog.Fatalf("%v", err) - } - - proxyList := &proxyproto.Listener{Listener: listener} - - // start goroutine that accepts tcp connections in port 443 - go func() { - for { - var conn net.Conn - var err error - - if n.isProxyProtocolEnabled { - // we need to wrap the listener in order to decode - // proxy protocol before handling the connection - conn, err = proxyList.Accept() - } else { - conn, err = listener.Accept() - } - - if err != nil { - glog.Warningf("unexpected error accepting tcp connection: %v", err) - continue - } - - glog.V(3).Infof("remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr()) - go n.proxy.Handle(conn) - } - }() - var onChange func() onChange = func() { template, err := ngx_template.NewTemplate(tmplPath, onChange) @@ -147,14 +115,13 @@ Error loading new template : %v n.t = ngxTpl - go n.Start() - - return ingress.Controller(n) + return n } // NGINXController ... type NGINXController struct { - t *ngx_template.Template + controller *controller.GenericController + t *ngx_template.Template configmap *api_v1.ConfigMap @@ -165,9 +132,6 @@ type NGINXController struct { cmdArgs []string - watchClass string - namespace string - stats *statsCollector statusModule statusModule @@ -177,15 +141,35 @@ type NGINXController struct { // returns true if proxy protocol es enabled isProxyProtocolEnabled bool + isSSLPassthroughEnabled bool + + isShuttingDown bool + proxy *proxy + + ports *config.ListenPorts + + backendDefaults defaults.Backend } // Start start a new NGINX master process running in foreground. func (n *NGINXController) Start() { - glog.Info("starting NGINX process...") + n.isShuttingDown = false + + n.controller = controller.NewIngressController(n) + go n.controller.Start() done := make(chan error, 1) cmd := exec.Command(n.binary, "-c", cfgPath) + + // put nginx in another process group to prevent it + // to receive signals meant for the controller + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + Pgid: 0, + } + + glog.Info("starting NGINX process...") n.start(cmd, done) // if the nginx master process dies the workers continue to process requests, @@ -195,6 +179,11 @@ func (n *NGINXController) Start() { // To avoid this issue we restart nginx in case of errors. for { err := <-done + + if n.isShuttingDown { + break + } + if exitError, ok := err.(*exec.ExitError); ok { waitStatus := exitError.Sys().(syscall.WaitStatus) glog.Warningf(` @@ -214,11 +203,34 @@ NGINX master process died (%v): %v conn.Close() time.Sleep(1 * time.Second) } - // start a new nginx master process + // restart a new nginx master process if the controller + // is not being stopped n.start(cmd, done) } } +// Stop gracefully stops the NGINX master process. +func (n *NGINXController) Stop() error { + n.isShuttingDown = true + n.controller.Stop() + + // Send stop signal to Nginx + glog.Info("stopping NGINX process...") + cmd := exec.Command(n.binary, "-c", cfgPath, "-s", "quit") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + return err + } + + // Wait for the Nginx process disappear + waitForNginxShutdown() + glog.Info("NGINX process has stopped") + + return nil +} + func (n *NGINXController) start(cmd *exec.Cmd, done chan error) { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr @@ -237,17 +249,16 @@ func (n *NGINXController) start(cmd *exec.Cmd, done chan error) { // BackendDefaults returns the nginx defaults func (n NGINXController) BackendDefaults() defaults.Backend { - if n.configmap == nil { - d := config.NewDefault() - return d.Backend - } - - return ngx_template.ReadConfig(n.configmap.Data).Backend + return n.backendDefaults } // printDiff returns the difference between the running configuration // and the new one func (n NGINXController) printDiff(data []byte) { + if !glog.V(2) { + return + } + in, err := os.Open(cfgPath) if err != nil { return @@ -276,10 +287,9 @@ func (n NGINXController) printDiff(data []byte) { return } - if glog.V(2) { - glog.Infof("NGINX configuration diff\n") - glog.Infof("%v", string(diffOutput)) - } + glog.Infof("NGINX configuration diff\n") + glog.Infof("%v", string(diffOutput)) + os.Remove(tmpfile.Name()) } } @@ -294,13 +304,42 @@ func (n NGINXController) Info() *ingress.BackendInfo { } } +// DefaultEndpoint returns the default endpoint to be use as default server that returns 404. +func (n NGINXController) DefaultEndpoint() ingress.Endpoint { + return ingress.Endpoint{ + Address: "127.0.0.1", + Port: fmt.Sprintf("%v", n.ports.Default), + Target: &api.ObjectReference{}, + } +} + // ConfigureFlags allow to configure more flags before the parsing of // command line arguments func (n *NGINXController) ConfigureFlags(flags *pflag.FlagSet) { + flags.BoolVar(&n.isSSLPassthroughEnabled, "enable-ssl-passthrough", false, `Enable SSL passthrough feature. Default is disabled`) + flags.IntVar(&n.ports.HTTP, "http-port", 80, `Indicates the port to use for HTTP traffic`) + flags.IntVar(&n.ports.HTTPS, "https-port", 443, `Indicates the port to use for HTTPS traffic`) + flags.IntVar(&n.ports.Status, "status-port", 18080, `Indicates the TCP port to use for exposing the nginx status page`) + flags.IntVar(&n.ports.SSLProxy, "ssl-passtrough-proxy-port", 442, `Default port to use internally for SSL when SSL Passthgough is enabled`) + flags.IntVar(&n.ports.Default, "default-server-port", 8181, `Default port to use for exposing the default server (catch all)`) } // OverrideFlags customize NGINX controller flags func (n *NGINXController) OverrideFlags(flags *pflag.FlagSet) { + // we check port collisions + if !isPortAvailable(n.ports.HTTP) { + glog.Fatalf("Port %v is already in use. Please check the flag --http-port", n.ports.HTTP) + } + if !isPortAvailable(n.ports.HTTPS) { + glog.Fatalf("Port %v is already in use. Please check the flag --https-port", n.ports.HTTPS) + } + if !isPortAvailable(n.ports.Status) { + glog.Fatalf("Port %v is already in use. Please check the flag --status-port", n.ports.Status) + } + if !isPortAvailable(n.ports.Default) { + glog.Fatalf("Port %v is already in use. Please check the flag --default-server-port", n.ports.Default) + } + ic, _ := flags.GetString("ingress-class") wc, _ := flags.GetString("watch-namespace") @@ -313,7 +352,58 @@ func (n *NGINXController) OverrideFlags(flags *pflag.FlagSet) { } flags.Set("ingress-class", ic) - n.stats = newStatsCollector(wc, ic, n.binary) + + h, _ := flags.GetInt("healthz-port") + n.ports.Health = h + + n.stats = newStatsCollector(wc, ic, n.binary, n.ports.Status) + + if n.isSSLPassthroughEnabled { + if !isPortAvailable(n.ports.SSLProxy) { + glog.Fatalf("Port %v is already in use. Please check the flag --ssl-passtrough-proxy-port", n.ports.SSLProxy) + } + + glog.Info("starting TLS proxy for SSL passthrough") + n.proxy = &proxy{ + Default: &server{ + Hostname: "localhost", + IP: "127.0.0.1", + Port: n.ports.SSLProxy, + ProxyProtocol: true, + }, + } + + listener, err := net.Listen("tcp", fmt.Sprintf(":%v", n.ports.HTTPS)) + if err != nil { + glog.Fatalf("%v", err) + } + + proxyList := &proxyproto.Listener{Listener: listener} + + // start goroutine that accepts tcp connections in port 443 + go func() { + for { + var conn net.Conn + var err error + + if n.isProxyProtocolEnabled { + // we need to wrap the listener in order to decode + // proxy protocol before handling the connection + conn, err = proxyList.Accept() + } else { + conn, err = listener.Accept() + } + + if err != nil { + glog.Warningf("unexpected error accepting tcp connection: %v", err) + continue + } + + glog.V(3).Infof("remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr()) + go n.proxy.Handle(conn) + } + }() + } } // DefaultIngressClass just return the default ingress class @@ -355,20 +445,22 @@ Error: %v // SetConfig sets the configured configmap func (n *NGINXController) SetConfig(cmap *api_v1.ConfigMap) { n.configmap = cmap - n.isProxyProtocolEnabled = false - if cmap == nil { - return + + m := map[string]string{} + if cmap != nil { + m = cmap.Data } - val, ok := cmap.Data["use-proxy-protocol"] + val, ok := m["use-proxy-protocol"] if ok { b, err := strconv.ParseBool(val) if err == nil { n.isProxyProtocolEnabled = b - return } } + + n.backendDefaults = ngx_template.ReadConfig(m).Backend } // SetListers sets the configured store listers in the generic ingress controller @@ -376,7 +468,12 @@ func (n *NGINXController) SetListers(lister ingress.StoreLister) { n.storeLister = lister } -// OnUpdate is called by syncQueue in https://github.com/aledbf/ingress-controller/blob/master/pkg/ingress/controller/controller.go#L82 +// UpdateIngressStatus custom Ingress status update +func (n *NGINXController) UpdateIngressStatus(*extensions.Ingress) []api_v1.LoadBalancerIngress { + return nil +} + +// OnUpdate is called by syncQueue in https://github.com/kubernetes/ingress/blob/master/core/pkg/ingress/controller/controller.go#L426 // periodically to keep the configuration in sync. // // convert configmap to custom configuration object (different in each implementation) @@ -385,15 +482,6 @@ func (n *NGINXController) SetListers(lister ingress.StoreLister) { // returning nill implies the backend will be reloaded. // if an error is returned means requeue the update func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { - var longestName int - var serverNameBytes int - for _, srv := range ingressCfg.Servers { - if longestName < len(srv.Hostname) { - longestName = len(srv.Hostname) - } - serverNameBytes += len(srv.Hostname) - } - cfg := ngx_template.ReadConfig(n.configmap.Data) cfg.Resolver = n.resolver @@ -430,7 +518,9 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { }) } - n.proxy.ServerList = servers + if n.isSSLPassthroughEnabled { + n.proxy.ServerList = servers + } // we need to check if the status module configuration changed if cfg.EnableVtsStatus { @@ -439,14 +529,44 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { n.setupMonitor(defaultStatusModule) } - // NGINX cannot resize the has tables used to store server names. + // NGINX cannot resize the hash tables used to store server names. // For this reason we check if the defined size defined is correct // for the FQDN defined in the ingress rules adjusting the value // if is required. // https://trac.nginx.org/nginx/ticket/352 // https://trac.nginx.org/nginx/ticket/631 - nameHashBucketSize := nginxHashBucketSize(longestName) + var longestName int + var serverNameBytes int + redirectServers := make(map[string]string) + for _, srv := range ingressCfg.Servers { + if longestName < len(srv.Hostname) { + longestName = len(srv.Hostname) + } + serverNameBytes += len(srv.Hostname) + if srv.RedirectFromToWWW { + var n string + if strings.HasPrefix(srv.Hostname, "www.") { + n = strings.TrimLeft(srv.Hostname, "www.") + } else { + n = fmt.Sprintf("www.%v", srv.Hostname) + } + glog.V(3).Infof("creating redirect from %v to %v", srv.Hostname, n) + if _, ok := redirectServers[n]; !ok { + found := false + for _, esrv := range ingressCfg.Servers { + if esrv.Hostname == n { + found = true + break + } + } + if !found { + redirectServers[n] = srv.Hostname + } + } + } + } if cfg.ServerNameHashBucketSize == 0 { + nameHashBucketSize := nginxHashBucketSize(longestName) glog.V(3).Infof("adjusting ServerNameHashBucketSize variable to %v", nameHashBucketSize) cfg.ServerNameHashBucketSize = nameHashBucketSize } @@ -482,6 +602,18 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { } } + addHeaders := map[string]string{} + if cfg.AddHeaders != "" { + cmap, exists, err := n.storeLister.ConfigMap.GetByKey(cfg.AddHeaders) + if err != nil { + glog.Warningf("unexpected error reading configmap %v: %v", cfg.AddHeaders, err) + } + + if exists { + addHeaders = cmap.(*api_v1.ConfigMap).Data + } + } + sslDHParam := "" if cfg.SSLDHParam != "" { secretName := cfg.SSLDHParam @@ -508,20 +640,26 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { cfg.SSLDHParam = sslDHParam - content, err := n.t.Write(config.TemplateConfig{ - ProxySetHeaders: setHeaders, - MaxOpenFiles: maxOpenFiles, - BacklogSize: sysctlSomaxconn(), - Backends: ingressCfg.Backends, - PassthroughBackends: ingressCfg.PassthroughBackends, - Servers: ingressCfg.Servers, - TCPBackends: ingressCfg.TCPEndpoints, - UDPBackends: ingressCfg.UDPEndpoints, - HealthzURI: ngxHealthPath, - CustomErrors: len(cfg.CustomHTTPErrors) > 0, - Cfg: cfg, - IsIPV6Enabled: n.isIPV6Enabled && !cfg.DisableIpv6, - }) + tc := config.TemplateConfig{ + ProxySetHeaders: setHeaders, + AddHeaders: addHeaders, + MaxOpenFiles: maxOpenFiles, + BacklogSize: sysctlSomaxconn(), + Backends: ingressCfg.Backends, + PassthroughBackends: ingressCfg.PassthroughBackends, + Servers: ingressCfg.Servers, + TCPBackends: ingressCfg.TCPEndpoints, + UDPBackends: ingressCfg.UDPEndpoints, + HealthzURI: ngxHealthPath, + CustomErrors: len(cfg.CustomHTTPErrors) > 0, + Cfg: cfg, + IsIPV6Enabled: n.isIPV6Enabled && !cfg.DisableIpv6, + RedirectServers: redirectServers, + IsSSLPassthroughEnabled: n.isSSLPassthroughEnabled, + ListenPorts: n.ports, + } + + content, err := n.t.Write(tc) if err != nil { return err @@ -539,9 +677,9 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { return err } - o, e := exec.Command(n.binary, "-s", "reload", "-c", cfgPath).CombinedOutput() + o, err := exec.Command(n.binary, "-s", "reload", "-c", cfgPath).CombinedOutput() if err != nil { - return fmt.Errorf("%v\n%v", e, string(o)) + return fmt.Errorf("%v\n%v", err, string(o)) } return nil @@ -564,7 +702,7 @@ func (n NGINXController) Name() string { // Check returns if the nginx healthz endpoint is returning ok (status code 200) func (n NGINXController) Check(_ *http.Request) error { - res, err := http.Get(fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxHealthPath)) + res, err := http.Get(fmt.Sprintf("http://localhost:%v%v", n.ports.Status, ngxHealthPath)) if err != nil { return err } @@ -593,3 +731,27 @@ func isIPv6Enabled() bool { cmd := exec.Command("test", "-f", "/proc/net/if_inet6") return cmd.Run() == nil } + +// isNginxRunning returns true if a process with the name 'nginx' is found +func isNginxProcessPresent() bool { + processes, _ := ps.Processes() + for _, p := range processes { + if p.Executable() == "nginx" { + return true + } + } + return false +} + +func waitForNginxShutdown() { + timer := time.NewTicker(time.Second * 1) + defer timer.Stop() + for { + select { + case <-timer.C: + if !isNginxProcessPresent() { + return + } + } + } +} diff --git a/controllers/nginx/pkg/cmd/controller/utils.go b/controllers/nginx/pkg/cmd/controller/utils.go index 3a3b04823..05db390e0 100644 --- a/controllers/nginx/pkg/cmd/controller/utils.go +++ b/controllers/nginx/pkg/cmd/controller/utils.go @@ -17,7 +17,9 @@ limitations under the License. package main import ( + "fmt" "io/ioutil" + "net" "os" "os/exec" "syscall" @@ -74,3 +76,12 @@ func diff(b1, b2 []byte) ([]byte, error) { out, _ := exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() return out, nil } + +func isPortAvailable(p int) bool { + ln, err := net.Listen("tcp", fmt.Sprintf(":%v", p)) + if err != nil { + return false + } + ln.Close() + return true +} diff --git a/controllers/nginx/pkg/config/config.go b/controllers/nginx/pkg/config/config.go index 0c23d8a27..4bb53b0db 100644 --- a/controllers/nginx/pkg/config/config.go +++ b/controllers/nginx/pkg/config/config.go @@ -43,12 +43,9 @@ const ( // max-age is the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. hstsMaxAge = "15724800" - // If UseProxyProtocol is enabled defIPCIDR defines the default the IP/network address of your external load balancer - defIPCIDR = "0.0.0.0/0" - gzipTypes = "application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component" - logFormatUpstream = `%v - [$the_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status` + logFormatUpstream = `%v - [$the_real_ip] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status` logFormatStream = `[$time_local] $protocol $status $bytes_sent $bytes_received $session_time` @@ -76,18 +73,35 @@ const ( // Default setting for load balancer algorithm defaultLoadBalancerAlgorithm = "least_conn" + + // Parameters for a shared memory zone that will keep states for various keys. + // http://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone + defaultLimitConnZoneVariable = "$binary_remote_addr" ) // Configuration represents the content of nginx.conf file type Configuration struct { defaults.Backend `json:",squash"` + // Sets the name of the configmap that contains the headers to pass to the client + AddHeaders string `json:"add-headers,omitempty"` + // AllowBackendServerHeader enables the return of the header Server from the backend // instead of the generic nginx string. // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_hide_header // By default this is disabled AllowBackendServerHeader bool `json:"allow-backend-server-header"` + // AccessLogPath sets the path of the access logs if enabled + // http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log + // By default access logs go to /var/log/nginx/access.log + AccessLogPath string `json:"access-log-path,omitempty"` + + // ErrorLogPath sets the path of the error logs + // http://nginx.org/en/docs/ngx_core_module.html#error_log + // By default error logs go to /var/log/nginx/error.log + ErrorLogPath string `json:"error-log-path,omitempty"` + // EnableDynamicTLSRecords enables dynamic TLS record sizes // https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency // By default this is enabled @@ -98,10 +112,18 @@ type Configuration struct { // http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size ClientHeaderBufferSize string `json:"client-header-buffer-size"` + // Defines a timeout for reading client request header, in seconds + // http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout + ClientHeaderTimeout int `json:"client-header-timeout,omitempty"` + // Sets buffer size for reading client request body // http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size ClientBodyBufferSize string `json:"client-body-buffer-size,omitempty"` + // Defines a timeout for reading client request body, in seconds + // http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout + ClientBodyTimeout int `json:"client-body-timeout,omitempty"` + // DisableAccessLog disables the Access Log globally from NGINX ingress controller //http://nginx.org/en/docs/http/ngx_http_log_module.html DisableAccessLog bool `json:"disable-access-log,omitempty"` @@ -177,7 +199,7 @@ type Configuration struct { // Enable json escaping // http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format - LogFormatEscapeJson bool `json:"log-format-escape-json,omitempty"` + LogFormatEscapeJSON bool `json:"log-format-escape-json,omitempty"` // Customize upstream log_format // http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format @@ -198,7 +220,7 @@ type Configuration struct { // If UseProxyProtocol is enabled ProxyRealIPCIDR defines the default the IP/network address // of your external load balancer - ProxyRealIPCIDR string `json:"proxy-real-ip-cidr,omitempty"` + ProxyRealIPCIDR []string `json:"proxy-real-ip-cidr,omitempty"` // Sets the name of the configmap that contains the headers to pass to the backend ProxySetHeaders string `json:"proxy-set-headers,omitempty"` @@ -214,6 +236,16 @@ type Configuration struct { // http://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_bucket_size ServerNameHashBucketSize int `json:"server-name-hash-bucket-size,omitempty"` + // Size of the bucket for the proxy headers hash tables + // http://nginx.org/en/docs/hash.html + // https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_max_size + ProxyHeadersHashMaxSize int `json:"proxy-headers-hash-max-size,omitempty"` + + // Maximum size of the bucket for the proxy headers hash tables + // http://nginx.org/en/docs/hash.html + // https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_bucket_size + ProxyHeadersHashBucketSize int `json:"proxy-headers-hash-bucket-size,omitempty"` + // Enables or disables emitting nginx version in error messages and in the “Server” response header field. // http://nginx.org/en/docs/http/ngx_http_core_module.html#server_tokens // Default: true @@ -283,6 +315,10 @@ type Configuration struct { // http://nginx.org/en/docs/ngx_core_module.html#worker_processes WorkerProcesses string `json:"worker-processes,omitempty"` + // Defines a timeout for a graceful shutdown of worker processes + // http://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout + WorkerShutdownTimeout string `json:"worker-shutdown-timeout,omitempty"` + // Defines the load balancing algorithm to use. The deault is round-robin LoadBalanceAlgorithm string `json:"load-balance,omitempty"` @@ -293,51 +329,90 @@ type Configuration struct { // Sets the maximum size of the variables hash table. // http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_max_size VariablesHashMaxSize int `json:"variables-hash-max-size,omitempty"` + + // Activates the cache for connections to upstream servers. + // The connections parameter sets the maximum number of idle keepalive connections to + // upstream servers that are preserved in the cache of each worker process. When this + // number is exceeded, the least recently used connections are closed. + // http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive + // Default: 32 + UpstreamKeepaliveConnections int `json:"upstream-keepalive-connections,omitempty"` + + // Sets the maximum size of the variables hash table. + // http://nginx.org/en/docs/http/ngx_http_map_module.html#variables_hash_max_size + LimitConnZoneVariable string `json:"limit-conn-zone-variable,omitempty"` + + // Sets the timeout between two successive read or write operations on client or proxied server connections. + // If no data is transmitted within this time, the connection is closed. + // http://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout + ProxyStreamTimeout string `json:"proxy-stream-timeout,omitempty"` + + // Sets the ipv4 addresses on which the server will accept requests. + BindAddressIpv4 []string `json:"bind-address-ipv4,omitempty"` + + // Sets the ipv6 addresses on which the server will accept requests. + BindAddressIpv6 []string `json:"bind-address-ipv6,omitempty"` + + // Sets the header field for identifying the originating IP address of a client + // Default is X-Forwarded-For + ForwardedForHeader string `json:"forwarded-for-header,omitempty"` } // NewDefault returns the default nginx configuration func NewDefault() Configuration { + defIPCIDR := make([]string, 0) + defIPCIDR = append(defIPCIDR, "0.0.0.0/0") + defBindAddress := make([]string, 0) cfg := Configuration{ AllowBackendServerHeader: false, + AccessLogPath: "/var/log/nginx/access.log", + ErrorLogPath: "/var/log/nginx/error.log", ClientHeaderBufferSize: "1k", + ClientHeaderTimeout: 60, ClientBodyBufferSize: "8k", + ClientBodyTimeout: 60, EnableDynamicTLSRecords: true, EnableUnderscoresInHeaders: false, ErrorLogLevel: errorLevel, + ForwardedForHeader: "X-Forwarded-For", HTTP2MaxFieldSize: "4k", HTTP2MaxHeaderSize: "16k", HSTS: true, - HSTSIncludeSubdomains: true, - HSTSMaxAge: hstsMaxAge, - HSTSPreload: false, - IgnoreInvalidHeaders: true, - GzipTypes: gzipTypes, - KeepAlive: 75, - KeepAliveRequests: 100, - LargeClientHeaderBuffers: "4 8k", - LogFormatEscapeJson: false, - LogFormatStream: logFormatStream, - LogFormatUpstream: logFormatUpstream, - MaxWorkerConnections: 16384, - MapHashBucketSize: 64, - ProxyRealIPCIDR: defIPCIDR, - ServerNameHashMaxSize: 1024, - ShowServerTokens: true, - SSLBufferSize: sslBufferSize, - SSLCiphers: sslCiphers, - SSLECDHCurve: "secp384r1", - SSLProtocols: sslProtocols, - SSLSessionCache: true, - SSLSessionCacheSize: sslSessionCacheSize, - SSLSessionTickets: true, - SSLSessionTimeout: sslSessionTimeout, - UseGzip: true, - WorkerProcesses: strconv.Itoa(runtime.NumCPU()), - LoadBalanceAlgorithm: defaultLoadBalancerAlgorithm, - VtsStatusZoneSize: "10m", - VariablesHashBucketSize: 64, - VariablesHashMaxSize: 2048, - UseHTTP2: true, + HSTSIncludeSubdomains: true, + HSTSMaxAge: hstsMaxAge, + HSTSPreload: false, + IgnoreInvalidHeaders: true, + GzipTypes: gzipTypes, + KeepAlive: 75, + KeepAliveRequests: 100, + LargeClientHeaderBuffers: "4 8k", + LogFormatEscapeJSON: false, + LogFormatStream: logFormatStream, + LogFormatUpstream: logFormatUpstream, + MaxWorkerConnections: 16384, + MapHashBucketSize: 64, + ProxyRealIPCIDR: defIPCIDR, + ServerNameHashMaxSize: 1024, + ProxyHeadersHashMaxSize: 512, + ProxyHeadersHashBucketSize: 64, + ShowServerTokens: true, + SSLBufferSize: sslBufferSize, + SSLCiphers: sslCiphers, + SSLECDHCurve: "secp384r1", + SSLProtocols: sslProtocols, + SSLSessionCache: true, + SSLSessionCacheSize: sslSessionCacheSize, + SSLSessionTickets: true, + SSLSessionTimeout: sslSessionTimeout, + UseGzip: true, + WorkerProcesses: strconv.Itoa(runtime.NumCPU()), + WorkerShutdownTimeout: "10s", + LoadBalanceAlgorithm: defaultLoadBalancerAlgorithm, + VtsStatusZoneSize: "10m", + VariablesHashBucketSize: 64, + VariablesHashMaxSize: 2048, + UseHTTP2: true, + ProxyStreamTimeout: "600s", Backend: defaults.Backend{ ProxyBodySize: bodySize, ProxyConnectTimeout: 5, @@ -346,11 +421,19 @@ func NewDefault() Configuration { ProxyBufferSize: "4k", ProxyCookieDomain: "off", ProxyCookiePath: "off", + ProxyNextUpstream: "error timeout invalid_header http_502 http_503 http_504", + ProxyRequestBuffering: "on", SSLRedirect: true, CustomHTTPErrors: []int{}, WhitelistSourceRange: []string{}, SkipAccessLogURLs: []string{}, + LimitRate: 0, + LimitRateAfter: 0, }, + UpstreamKeepaliveConnections: 32, + LimitConnZoneVariable: defaultLimitConnZoneVariable, + BindAddressIpv4: defBindAddress, + BindAddressIpv6: defBindAddress, } if glog.V(5) { @@ -365,7 +448,7 @@ func NewDefault() Configuration { // is enabled. func (cfg Configuration) BuildLogFormatUpstream() string { if cfg.LogFormatUpstream == logFormatUpstream { - return fmt.Sprintf(cfg.LogFormatUpstream, "$the_x_forwarded_for") + return fmt.Sprintf(cfg.LogFormatUpstream, "$the_real_ip") } return cfg.LogFormatUpstream @@ -373,16 +456,31 @@ func (cfg Configuration) BuildLogFormatUpstream() string { // TemplateConfig contains the nginx configuration to render the file nginx.conf type TemplateConfig struct { - ProxySetHeaders map[string]string - MaxOpenFiles int - BacklogSize int - Backends []*ingress.Backend - PassthroughBackends []*ingress.SSLPassthroughBackend - Servers []*ingress.Server - TCPBackends []ingress.L4Service - UDPBackends []ingress.L4Service - HealthzURI string - CustomErrors bool - Cfg Configuration - IsIPV6Enabled bool + ProxySetHeaders map[string]string + AddHeaders map[string]string + MaxOpenFiles int + BacklogSize int + Backends []*ingress.Backend + PassthroughBackends []*ingress.SSLPassthroughBackend + Servers []*ingress.Server + TCPBackends []ingress.L4Service + UDPBackends []ingress.L4Service + HealthzURI string + CustomErrors bool + Cfg Configuration + IsIPV6Enabled bool + IsSSLPassthroughEnabled bool + RedirectServers map[string]string + ListenPorts *ListenPorts +} + +// ListenPorts describe the ports required to run the +// NGINX Ingress controller +type ListenPorts struct { + HTTP int + HTTPS int + Status int + Health int + Default int + SSLProxy int } diff --git a/controllers/nginx/pkg/config/config_test.go b/controllers/nginx/pkg/config/config_test.go index f0a511c8e..2c730d71e 100644 --- a/controllers/nginx/pkg/config/config_test.go +++ b/controllers/nginx/pkg/config/config_test.go @@ -28,8 +28,8 @@ func TestBuildLogFormatUpstream(t *testing.T) { curLogFormat string expected string }{ - {true, logFormatUpstream, fmt.Sprintf(logFormatUpstream, "$the_x_forwarded_for")}, - {false, logFormatUpstream, fmt.Sprintf(logFormatUpstream, "$the_x_forwarded_for")}, + {true, logFormatUpstream, fmt.Sprintf(logFormatUpstream, "$the_real_ip")}, + {false, logFormatUpstream, fmt.Sprintf(logFormatUpstream, "$the_real_ip")}, {true, "my-log-format", "my-log-format"}, {false, "john-log-format", "john-log-format"}, } diff --git a/controllers/nginx/pkg/metric/collector/status.go b/controllers/nginx/pkg/metric/collector/status.go index 361838a31..f5e24221a 100644 --- a/controllers/nginx/pkg/metric/collector/status.go +++ b/controllers/nginx/pkg/metric/collector/status.go @@ -143,8 +143,8 @@ func (bit BoolToFloat64) UnmarshalJSON(data []byte) error { return nil } -func getNginxStatus(ngxHealthPort int, ngxStatusPath string) (*basicStatus, error) { - url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath) +func getNginxStatus(port int, path string) (*basicStatus, error) { + url := fmt.Sprintf("http://localhost:%v%v", port, path) glog.V(3).Infof("start scrapping url: %v", url) data, err := httpBody(url) @@ -174,8 +174,8 @@ func httpBody(url string) ([]byte, error) { return data, nil } -func getNginxVtsMetrics(ngxHealthPort int, ngxVtsPath string) (*vts, error) { - url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxVtsPath) +func getNginxVtsMetrics(port int, path string) (*vts, error) { + url := fmt.Sprintf("http://localhost:%v%v", port, path) glog.V(3).Infof("start scrapping url: %v", url) data, err := httpBody(url) diff --git a/controllers/nginx/pkg/metric/collector/vts.go b/controllers/nginx/pkg/metric/collector/vts.go index d8e2001ed..f57cc4dd1 100644 --- a/controllers/nginx/pkg/metric/collector/vts.go +++ b/controllers/nginx/pkg/metric/collector/vts.go @@ -28,8 +28,8 @@ const ns = "nginx" type ( vtsCollector struct { scrapeChan chan scrapeRequest - ngxHealthPort int - ngxVtsPath string + port int + path string data *vtsData watchNamespace string ingressClass string @@ -39,10 +39,10 @@ type ( bytes *prometheus.Desc cache *prometheus.Desc connections *prometheus.Desc - response *prometheus.Desc - request *prometheus.Desc + responses *prometheus.Desc + requests *prometheus.Desc filterZoneBytes *prometheus.Desc - filterZoneResponse *prometheus.Desc + filterZoneResponses *prometheus.Desc filterZoneCache *prometheus.Desc upstreamBackup *prometheus.Desc upstreamBytes *prometheus.Desc @@ -50,19 +50,19 @@ type ( upstreamFailTimeout *prometheus.Desc upstreamMaxFails *prometheus.Desc upstreamResponses *prometheus.Desc - upstreamRequest *prometheus.Desc + upstreamRequests *prometheus.Desc upstreamResponseMsec *prometheus.Desc upstreamWeight *prometheus.Desc } ) // NewNGINXVTSCollector returns a new prometheus collector for the VTS module -func NewNGINXVTSCollector(watchNamespace, ingressClass string, ngxHealthPort int, ngxVtsPath string) Stopable { +func NewNGINXVTSCollector(watchNamespace, ingressClass string, port int, path string) Stopable { p := vtsCollector{ scrapeChan: make(chan scrapeRequest), - ngxHealthPort: ngxHealthPort, - ngxVtsPath: ngxVtsPath, + port: port, + path: path, watchNamespace: watchNamespace, ingressClass: ingressClass, } @@ -83,12 +83,12 @@ func NewNGINXVTSCollector(watchNamespace, ingressClass string, ngxHealthPort int "Nginx connections count", []string{"ingress_class", "namespace", "type"}, nil), - response: prometheus.NewDesc( + responses: prometheus.NewDesc( prometheus.BuildFQName(ns, "", "responses_total"), "The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", []string{"ingress_class", "namespace", "server_zone", "status_code"}, nil), - request: prometheus.NewDesc( + requests: prometheus.NewDesc( prometheus.BuildFQName(ns, "", "requests_total"), "The total number of requested client connections.", []string{"ingress_class", "namespace", "server_zone"}, nil), @@ -98,7 +98,7 @@ func NewNGINXVTSCollector(watchNamespace, ingressClass string, ngxHealthPort int "Nginx bytes count", []string{"ingress_class", "namespace", "server_zone", "country", "direction"}, nil), - filterZoneResponse: prometheus.NewDesc( + filterZoneResponses: prometheus.NewDesc( prometheus.BuildFQName(ns, "", "filterzone_responses_total"), "The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", []string{"ingress_class", "namespace", "server_zone", "country", "status_code"}, nil), @@ -138,7 +138,7 @@ func NewNGINXVTSCollector(watchNamespace, ingressClass string, ngxHealthPort int "The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.", []string{"ingress_class", "namespace", "upstream", "server", "status_code"}, nil), - upstreamRequest: prometheus.NewDesc( + upstreamRequests: prometheus.NewDesc( prometheus.BuildFQName(ns, "", "upstream_requests_total"), "The total number of client connections forwarded to this server.", []string{"ingress_class", "namespace", "upstream", "server"}, nil), @@ -164,20 +164,20 @@ func (p vtsCollector) Describe(ch chan<- *prometheus.Desc) { ch <- p.data.bytes ch <- p.data.cache ch <- p.data.connections - ch <- p.data.request - ch <- p.data.response + ch <- p.data.requests + ch <- p.data.responses ch <- p.data.upstreamBackup ch <- p.data.upstreamBytes ch <- p.data.upstreamDown ch <- p.data.upstreamFailTimeout ch <- p.data.upstreamMaxFails - ch <- p.data.upstreamRequest + ch <- p.data.upstreamRequests ch <- p.data.upstreamResponseMsec ch <- p.data.upstreamResponses ch <- p.data.upstreamWeight ch <- p.data.filterZoneBytes ch <- p.data.filterZoneCache - ch <- p.data.filterZoneResponse + ch <- p.data.filterZoneResponses } // Collect implements prometheus.Collector. @@ -201,7 +201,7 @@ func (p vtsCollector) Stop() { // scrapeVts scrape nginx vts metrics func (p vtsCollector) scrapeVts(ch chan<- prometheus.Metric) { - nginxMetrics, err := getNginxVtsMetrics(p.ngxHealthPort, p.ngxVtsPath) + nginxMetrics, err := getNginxVtsMetrics(p.port, p.path) if err != nil { glog.Warningf("unexpected error obtaining nginx status info: %v", err) return @@ -213,7 +213,7 @@ func (p vtsCollector) scrapeVts(ch chan<- prometheus.Metric) { for pos, value := range zones { reflectMetrics(&zones[pos].Responses, p.data.upstreamResponses, ch, p.ingressClass, p.watchNamespace, name, value.Server) - ch <- prometheus.MustNewConstMetric(p.data.upstreamRequest, + ch <- prometheus.MustNewConstMetric(p.data.upstreamRequests, prometheus.CounterValue, zones[pos].RequestCounter, p.ingressClass, p.watchNamespace, name, value.Server) ch <- prometheus.MustNewConstMetric(p.data.upstreamDown, prometheus.CounterValue, float64(zones[pos].Down), p.ingressClass, p.watchNamespace, name, value.Server) @@ -235,10 +235,10 @@ func (p vtsCollector) scrapeVts(ch chan<- prometheus.Metric) { } for name, zone := range nginxMetrics.ServerZones { - reflectMetrics(&zone.Responses, p.data.response, ch, p.ingressClass, p.watchNamespace, name) + reflectMetrics(&zone.Responses, p.data.responses, ch, p.ingressClass, p.watchNamespace, name) reflectMetrics(&zone.Cache, p.data.cache, ch, p.ingressClass, p.watchNamespace, name) - ch <- prometheus.MustNewConstMetric(p.data.request, + ch <- prometheus.MustNewConstMetric(p.data.requests, prometheus.CounterValue, zone.RequestCounter, p.ingressClass, p.watchNamespace, name) ch <- prometheus.MustNewConstMetric(p.data.bytes, prometheus.CounterValue, zone.InBytes, p.ingressClass, p.watchNamespace, name, "in") @@ -248,13 +248,13 @@ func (p vtsCollector) scrapeVts(ch chan<- prometheus.Metric) { for serverZone, countries := range nginxMetrics.FilterZones { for country, zone := range countries { - reflectMetrics(&zone.Responses, p.data.filterZoneResponse, ch, p.ingressClass, p.watchNamespace, serverZone, country) + reflectMetrics(&zone.Responses, p.data.filterZoneResponses, ch, p.ingressClass, p.watchNamespace, serverZone, country) reflectMetrics(&zone.Cache, p.data.filterZoneCache, ch, p.ingressClass, p.watchNamespace, serverZone, country) ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes, - prometheus.CounterValue, float64(zone.InBytes), p.ingressClass, p.watchNamespace, serverZone, country, "in") + prometheus.CounterValue, zone.InBytes, p.ingressClass, p.watchNamespace, serverZone, country, "in") ch <- prometheus.MustNewConstMetric(p.data.filterZoneBytes, - prometheus.CounterValue, float64(zone.OutBytes), p.ingressClass, p.watchNamespace, serverZone, country, "out") + prometheus.CounterValue, zone.OutBytes, p.ingressClass, p.watchNamespace, serverZone, country, "out") } } } @@ -266,7 +266,7 @@ func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- promethe tag := val.Type().Field(i).Tag l := append(labels, tag.Get("json")) ch <- prometheus.MustNewConstMetric(desc, - prometheus.CounterValue, float64(val.Field(i).Interface().(float64)), + prometheus.CounterValue, val.Field(i).Interface().(float64), l...) } } diff --git a/controllers/nginx/pkg/template/configmap.go b/controllers/nginx/pkg/template/configmap.go index 0597faa7e..7113aef4e 100644 --- a/controllers/nginx/pkg/template/configmap.go +++ b/controllers/nginx/pkg/template/configmap.go @@ -17,6 +17,8 @@ limitations under the License. package template import ( + "fmt" + "net" "strconv" "strings" @@ -24,12 +26,15 @@ import ( "github.com/mitchellh/mapstructure" "k8s.io/ingress/controllers/nginx/pkg/config" + ing_net "k8s.io/ingress/core/pkg/net" ) const ( customHTTPErrors = "custom-http-errors" skipAccessLogUrls = "skip-access-log-urls" whitelistSourceRange = "whitelist-source-range" + proxyRealIPCIDR = "proxy-real-ip-cidr" + bindAddress = "bind-address" ) // ReadConfig obtains the configuration defined by the user merged with the defaults. @@ -45,6 +50,9 @@ func ReadConfig(src map[string]string) config.Configuration { errors := make([]int, 0) skipUrls := make([]string, 0) whitelist := make([]string, 0) + proxylist := make([]string, 0) + bindAddressIpv4List := make([]string, 0) + bindAddressIpv6List := make([]string, 0) if val, ok := conf[customHTTPErrors]; ok { delete(conf, customHTTPErrors) @@ -65,11 +73,35 @@ func ReadConfig(src map[string]string) config.Configuration { delete(conf, whitelistSourceRange) whitelist = append(whitelist, strings.Split(val, ",")...) } + if val, ok := conf[proxyRealIPCIDR]; ok { + delete(conf, proxyRealIPCIDR) + proxylist = append(proxylist, strings.Split(val, ",")...) + } else { + proxylist = append(proxylist, "0.0.0.0/0") + } + if val, ok := conf[bindAddress]; ok { + delete(conf, bindAddress) + for _, i := range strings.Split(val, ",") { + ns := net.ParseIP(i) + if ns != nil { + if ing_net.IsIPV6(ns) { + bindAddressIpv6List = append(bindAddressIpv6List, fmt.Sprintf("[%v]", ns)) + } else { + bindAddressIpv4List = append(bindAddressIpv4List, fmt.Sprintf("%v", ns)) + } + } else { + glog.Warningf("%v is not a valid textual representation of an IP address", i) + } + } + } to := config.NewDefault() to.CustomHTTPErrors = filterErrors(errors) to.SkipAccessLogURLs = skipUrls to.WhitelistSourceRange = whitelist + to.ProxyRealIPCIDR = proxylist + to.BindAddressIpv4 = bindAddressIpv4List + to.BindAddressIpv6 = bindAddressIpv6List config := &mapstructure.DecoderConfig{ Metadata: nil, diff --git a/controllers/nginx/pkg/template/configmap_test.go b/controllers/nginx/pkg/template/configmap_test.go index 130a452a6..59bddbc56 100644 --- a/controllers/nginx/pkg/template/configmap_test.go +++ b/controllers/nginx/pkg/template/configmap_test.go @@ -39,19 +39,30 @@ func TestMergeConfigMapToStruct(t *testing.T) { "skip-access-log-urls": "/log,/demo,/test", "use-proxy-protocol": "true", "disable-access-log": "true", + "access-log-path": "/var/log/test/access.log", + "error-log-path": "/var/log/test/error.log", "use-gzip": "true", "enable-dynamic-tls-records": "false", "gzip-types": "text/html", + "proxy-real-ip-cidr": "1.1.1.1/8,2.2.2.2/24", + "bind-address": "1.1.1.1,2.2.2.2,3.3.3,2001:db8:a0b:12f0::1,3731:54:65fe:2::a7,33:33:33::33::33", + "worker-shutdown-timeout": "99s", } def := config.NewDefault() def.CustomHTTPErrors = []int{300, 400} def.DisableAccessLog = true + def.AccessLogPath = "/var/log/test/access.log" + def.ErrorLogPath = "/var/log/test/error.log" def.SkipAccessLogURLs = []string{"/log", "/demo", "/test"} def.ProxyReadTimeout = 1 def.ProxySendTimeout = 2 def.EnableDynamicTLSRecords = false def.UseProxyProtocol = true def.GzipTypes = "text/html" + def.ProxyRealIPCIDR = []string{"1.1.1.1/8", "2.2.2.2/24"} + def.BindAddressIpv4 = []string{"1.1.1.1", "2.2.2.2"} + def.BindAddressIpv6 = []string{"[2001:db8:a0b:12f0::1]", "[3731:54:65fe:2::a7]"} + def.WorkerShutdownTimeout = "99s" to := ReadConfig(conf) if diff := pretty.Compare(to, def); diff != "" { diff --git a/controllers/nginx/pkg/template/template.go b/controllers/nginx/pkg/template/template.go index 024f3ce9d..d42c153a8 100644 --- a/controllers/nginx/pkg/template/template.go +++ b/controllers/nginx/pkg/template/template.go @@ -22,19 +22,21 @@ import ( "encoding/json" "fmt" "net" + "net/url" "os" "os/exec" - "regexp" + "strconv" "strings" text_template "text/template" - "k8s.io/apimachinery/pkg/util/sets" - "github.com/golang/glog" - "github.com/pborman/uuid" + + extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/ingress/controllers/nginx/pkg/config" "k8s.io/ingress/core/pkg/ingress" + "k8s.io/ingress/core/pkg/ingress/annotations/ratelimit" ing_net "k8s.io/ingress/core/pkg/net" "k8s.io/ingress/core/pkg/watch" ) @@ -133,9 +135,11 @@ var ( "buildAuthLocation": buildAuthLocation, "buildAuthResponseHeaders": buildAuthResponseHeaders, "buildProxyPass": buildProxyPass, + "filterRateLimits": filterRateLimits, "buildRateLimitZones": buildRateLimitZones, "buildRateLimit": buildRateLimit, "buildResolvers": buildResolvers, + "buildUpstreamName": buildUpstreamName, "isLocationAllowed": isLocationAllowed, "buildLogFormatUpstream": buildLogFormatUpstream, "buildDenyVariable": buildDenyVariable, @@ -146,10 +150,18 @@ var ( "toUpper": strings.ToUpper, "toLower": strings.ToLower, "formatIP": formatIP, + "buildNextUpstream": buildNextUpstream, + "getIngressInformation": getIngressInformation, + "serverConfig": func(all config.TemplateConfig, server *ingress.Server) interface{} { + return struct{ First, Second interface{} }{all, server} + }, + "buildAuthSignURL": buildAuthSignURL, + "isValidClientBodyBufferSize": isValidClientBodyBufferSize, + "buildForwardedFor": buildForwardedFor, } ) -// fomatIP will wrap IPv6 addresses in [] and return IPv4 addresses +// formatIP will wrap IPv6 addresses in [] and return IPv4 addresses // without modification. If the input cannot be parsed as an IP address // it is returned without modification. func formatIP(input string) string { @@ -165,7 +177,7 @@ func formatIP(input string) string { // buildResolvers returns the resolvers reading the /etc/resolv.conf file func buildResolvers(a interface{}) string { - // NGINX need IPV6 addresses to be surrounded by brakets + // NGINX need IPV6 addresses to be surrounded by brackets nss := a.([]net.IP) if len(nss) == 0 { return "" @@ -193,7 +205,7 @@ func buildLocation(input interface{}) string { } path := location.Path - if len(location.Redirect.Target) > 0 && location.Redirect.Target != path { + if len(location.Rewrite.Target) > 0 && location.Rewrite.Target != path { if path == slash { return fmt.Sprintf("~* %s", path) } @@ -209,6 +221,7 @@ func buildLocation(input interface{}) string { return path } +// TODO: Needs Unit Tests func buildAuthLocation(input interface{}) string { location, ok := input.(*ingress.Location) if !ok { @@ -248,7 +261,7 @@ func buildAuthResponseHeaders(input interface{}) []string { func buildLogFormatUpstream(input interface{}) string { cfg, ok := input.(config.Configuration) if !ok { - glog.Errorf("error an ingress.buildLogFormatUpstream type but %T was returned", input) + glog.Errorf("error an ingress.buildLogFormatUpstream type but %T was returned", input) } return cfg.BuildLogFormatUpstream() @@ -258,7 +271,7 @@ func buildLogFormatUpstream(input interface{}) string { // (specified through the ingress.kubernetes.io/rewrite-to annotation) // If the annotation ingress.kubernetes.io/add-base-url:"true" is specified it will // add a base tag in the head of the response from the service -func buildProxyPass(b interface{}, loc interface{}) string { +func buildProxyPass(host string, b interface{}, loc interface{}) string { backends := b.([]*ingress.Backend) location, ok := loc.(*ingress.Location) if !ok { @@ -268,59 +281,93 @@ func buildProxyPass(b interface{}, loc interface{}) string { path := location.Path proto := "http" + upstreamName := location.Backend for _, backend := range backends { if backend.Name == location.Backend { if backend.Secure || backend.SSLPassthrough { proto = "https" } + + if isSticky(host, location, backend.SessionAffinity.CookieSessionAffinity.Locations) { + upstreamName = fmt.Sprintf("sticky-%v", upstreamName) + } + break } } // defProxyPass returns the default proxy_pass, just the name of the upstream - defProxyPass := fmt.Sprintf("proxy_pass %s://%s;", proto, location.Backend) + defProxyPass := fmt.Sprintf("proxy_pass %s://%s;", proto, upstreamName) // if the path in the ingress rule is equals to the target: no special rewrite - if path == location.Redirect.Target { + if path == location.Rewrite.Target { return defProxyPass } - if path != slash && !strings.HasSuffix(path, slash) { + if !strings.HasSuffix(path, slash) { path = fmt.Sprintf("%s/", path) } - if len(location.Redirect.Target) > 0 { + if len(location.Rewrite.Target) > 0 { abu := "" - if location.Redirect.AddBaseURL { + if location.Rewrite.AddBaseURL { // path has a slash suffix, so that it can be connected with baseuri directly bPath := fmt.Sprintf("%s%s", path, "$baseuri") - abu = fmt.Sprintf(`subs_filter '' '' r; - subs_filter '' '' r; - `, bPath, bPath) + if len(location.Rewrite.BaseURLScheme) > 0 { + abu = fmt.Sprintf(`subs_filter '' '' r; + subs_filter '' '' r; + `, location.Rewrite.BaseURLScheme, bPath, location.Rewrite.BaseURLScheme, bPath) + } else { + abu = fmt.Sprintf(`subs_filter '' '' r; + subs_filter '' '' r; + `, bPath, bPath) + } } - if location.Redirect.Target == slash { + if location.Rewrite.Target == slash { // special case redirect to / // ie /something to / return fmt.Sprintf(` - rewrite %s(.*) /$1 break; - rewrite %s / break; - proxy_pass %s://%s; - %v`, path, location.Path, proto, location.Backend, abu) + rewrite %s(.*) /$1 break; + rewrite %s / break; + proxy_pass %s://%s; + %v`, path, location.Path, proto, upstreamName, abu) } return fmt.Sprintf(` - rewrite %s(.*) %s/$1 break; - proxy_pass %s://%s; - %v`, path, location.Redirect.Target, proto, location.Backend, abu) + rewrite %s(.*) %s/$1 break; + proxy_pass %s://%s; + %v`, path, location.Rewrite.Target, proto, upstreamName, abu) } // default proxy_pass return defProxyPass } +// TODO: Needs Unit Tests +func filterRateLimits(input interface{}) []ratelimit.RateLimit { + ratelimits := []ratelimit.RateLimit{} + found := sets.String{} + + servers, ok := input.([]*ingress.Server) + if !ok { + return ratelimits + } + for _, server := range servers { + for _, loc := range server.Locations { + if loc.RateLimit.ID != "" && !found.Has(loc.RateLimit.ID) { + found.Insert(loc.RateLimit.ID) + ratelimits = append(ratelimits, loc.RateLimit) + } + } + } + return ratelimits +} + +// TODO: Needs Unit Tests // buildRateLimitZones produces an array of limit_conn_zone in order to allow -// rate limiting of request. Each Ingress rule could have up to two zones, one -// for connection limit by IP address and other for limiting request per second +// rate limiting of request. Each Ingress rule could have up to three zones, one +// for connection limit by IP address, one for limiting requests per minute, and +// one for limiting requests per second. func buildRateLimitZones(input interface{}) []string { zones := sets.String{} @@ -331,9 +378,9 @@ func buildRateLimitZones(input interface{}) []string { for _, server := range servers { for _, loc := range server.Locations { - if loc.RateLimit.Connections.Limit > 0 { - zone := fmt.Sprintf("limit_conn_zone $binary_remote_addr zone=%v:%vm;", + zone := fmt.Sprintf("limit_conn_zone $limit_%s zone=%v:%vm;", + loc.RateLimit.ID, loc.RateLimit.Connections.Name, loc.RateLimit.Connections.SharedSize) if !zones.Has(zone) { @@ -341,8 +388,20 @@ func buildRateLimitZones(input interface{}) []string { } } + if loc.RateLimit.RPM.Limit > 0 { + zone := fmt.Sprintf("limit_req_zone $limit_%s zone=%v:%vm rate=%vr/m;", + loc.RateLimit.ID, + loc.RateLimit.RPM.Name, + loc.RateLimit.RPM.SharedSize, + loc.RateLimit.RPM.Limit) + if !zones.Has(zone) { + zones.Insert(zone) + } + } + if loc.RateLimit.RPS.Limit > 0 { - zone := fmt.Sprintf("limit_req_zone $binary_remote_addr zone=%v:%vm rate=%vr/s;", + zone := fmt.Sprintf("limit_req_zone $limit_%s zone=%v:%vm rate=%vr/s;", + loc.RateLimit.ID, loc.RateLimit.RPS.Name, loc.RateLimit.RPS.SharedSize, loc.RateLimit.RPS.Limit) @@ -357,7 +416,7 @@ func buildRateLimitZones(input interface{}) []string { } // buildRateLimit produces an array of limit_req to be used inside the Path of -// Ingress rules. The order: connections by IP first and RPS next. +// Ingress rules. The order: connections by IP first, then RPS, and RPM last. func buildRateLimit(input interface{}) []string { limits := []string{} @@ -378,6 +437,24 @@ func buildRateLimit(input interface{}) []string { limits = append(limits, limit) } + if loc.RateLimit.RPM.Limit > 0 { + limit := fmt.Sprintf("limit_req zone=%v burst=%v nodelay;", + loc.RateLimit.RPM.Name, loc.RateLimit.RPM.Burst) + limits = append(limits, limit) + } + + if loc.RateLimit.LimitRateAfter > 0 { + limit := fmt.Sprintf("limit_rate_after %vk;", + loc.RateLimit.LimitRateAfter) + limits = append(limits, limit) + } + + if loc.RateLimit.LimitRate > 0 { + limit := fmt.Sprintf("limit_rate %vk;", + loc.RateLimit.LimitRate) + limits = append(limits, limit) + } + return limits } @@ -392,7 +469,6 @@ func isLocationAllowed(input interface{}) bool { } var ( - nonAlpha = regexp.MustCompile("[^a-zA-Z0-9]+") denyPathSlugMap = map[string]string{} ) @@ -405,8 +481,179 @@ func buildDenyVariable(a interface{}) string { l := a.(string) if _, ok := denyPathSlugMap[l]; !ok { - denyPathSlugMap[l] = uuid.New() + denyPathSlugMap[l] = buildRandomUUID() } return fmt.Sprintf("$deny_%v", denyPathSlugMap[l]) } + +// TODO: Needs Unit Tests +func buildUpstreamName(host string, b interface{}, loc interface{}) string { + backends := b.([]*ingress.Backend) + location, ok := loc.(*ingress.Location) + if !ok { + return "" + } + + upstreamName := location.Backend + + for _, backend := range backends { + if backend.Name == location.Backend { + if backend.SessionAffinity.AffinityType == "cookie" && + isSticky(host, location, backend.SessionAffinity.CookieSessionAffinity.Locations) { + upstreamName = fmt.Sprintf("sticky-%v", upstreamName) + } + + break + } + } + + return upstreamName +} + +// TODO: Needs Unit Tests +func isSticky(host string, loc *ingress.Location, stickyLocations map[string][]string) bool { + if _, ok := stickyLocations[host]; ok { + for _, sl := range stickyLocations[host] { + if sl == loc.Path { + return true + } + } + } + + return false +} + +func buildNextUpstream(input interface{}) string { + nextUpstream, ok := input.(string) + if !ok { + glog.Errorf("expected an string type but %T was returned", input) + } + + parts := strings.Split(nextUpstream, " ") + + nextUpstreamCodes := make([]string, 0, len(parts)) + for _, v := range parts { + if v != "" && v != "non_idempotent" { + nextUpstreamCodes = append(nextUpstreamCodes, v) + } + } + + return strings.Join(nextUpstreamCodes, " ") +} + +func buildAuthSignURL(input interface{}) string { + s, ok := input.(string) + if !ok { + glog.Errorf("expected an string type but %T was returned", input) + } + + u, _ := url.Parse(s) + q := u.Query() + if len(q) == 0 { + return fmt.Sprintf("%v?rd=$request_uri", s) + } + + return fmt.Sprintf("%v&rd=$request_uri", s) +} + +// buildRandomUUID return a random string to be used in the template +func buildRandomUUID() string { + s := uuid.New() + return strings.Replace(s, "-", "", -1) +} + +func isValidClientBodyBufferSize(input interface{}) bool { + s, ok := input.(string) + if !ok { + glog.Errorf("expected an string type but %T was returned", input) + return false + } + + if s == "" { + return false + } + + _, err := strconv.Atoi(s) + if err != nil { + sLowercase := strings.ToLower(s) + + kCheck := strings.TrimSuffix(sLowercase, "k") + _, err := strconv.Atoi(kCheck) + if err == nil { + return true + } + + mCheck := strings.TrimSuffix(sLowercase, "m") + _, err = strconv.Atoi(mCheck) + if err == nil { + return true + } + + glog.Errorf("client-body-buffer-size '%v' was provided in an incorrect format, hence it will not be set.", s) + return false + } + + return true +} + +type ingressInformation struct { + Namespace string + Rule string + Service string + Annotations map[string]string +} + +func getIngressInformation(i, p interface{}) *ingressInformation { + ing, ok := i.(*extensions.Ingress) + if !ok { + glog.V(3).Infof("expected an Ingress type but %T was returned", i) + return &ingressInformation{} + } + + path, ok := p.(string) + if !ok { + glog.V(3).Infof("expected a string type but %T was returned", p) + return &ingressInformation{} + } + + if ing == nil { + return &ingressInformation{} + } + + info := &ingressInformation{ + Namespace: ing.GetNamespace(), + Rule: ing.GetName(), + Annotations: ing.Annotations, + } + + if ing.Spec.Backend != nil { + info.Service = ing.Spec.Backend.ServiceName + } + + for _, rule := range ing.Spec.Rules { + if rule.HTTP == nil { + continue + } + + for _, rPath := range rule.HTTP.Paths { + if path == rPath.Path { + info.Service = rPath.Backend.ServiceName + return info + } + } + } + + return info +} + +func buildForwardedFor(input interface{}) string { + s, ok := input.(string) + if !ok { + glog.Errorf("expected an string type but %T was returned", input) + } + + ffh := strings.Replace(s, "-", "_", -1) + ffh = strings.ToLower(ffh) + return fmt.Sprintf("$http_%v", ffh) +} diff --git a/controllers/nginx/pkg/template/template_test.go b/controllers/nginx/pkg/template/template_test.go index d0eac50fa..b4b80acc8 100644 --- a/controllers/nginx/pkg/template/template_test.go +++ b/controllers/nginx/pkg/template/template_test.go @@ -18,73 +18,81 @@ package template import ( "encoding/json" + "io/ioutil" "os" "path" "reflect" "strings" "testing" - "io/ioutil" - "k8s.io/ingress/controllers/nginx/pkg/config" "k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/ingress/annotations/authreq" "k8s.io/ingress/core/pkg/ingress/annotations/rewrite" + "net" ) var ( // TODO: add tests for secure endpoints tmplFuncTestcases = map[string]struct { - Path string - Target string - Location string - ProxyPass string - AddBaseURL bool + Path string + Target string + Location string + ProxyPass string + AddBaseURL bool + BaseURLScheme string }{ - "invalid redirect / to /": {"/", "/", "/", "proxy_pass http://upstream-name;", false}, + "invalid redirect / to /": {"/", "/", "/", "proxy_pass http://upstream-name;", false, ""}, "redirect / to /jenkins": {"/", "/jenkins", "~* /", ` - rewrite /(.*) /jenkins/$1 break; - proxy_pass http://upstream-name; - `, false}, + rewrite /(.*) /jenkins/$1 break; + proxy_pass http://upstream-name; + `, false, ""}, "redirect /something to /": {"/something", "/", `~* ^/something\/?(?.*)`, ` - rewrite /something/(.*) /$1 break; - rewrite /something / break; - proxy_pass http://upstream-name; - `, false}, + rewrite /something/(.*) /$1 break; + rewrite /something / break; + proxy_pass http://upstream-name; + `, false, ""}, "redirect /end-with-slash/ to /not-root": {"/end-with-slash/", "/not-root", "~* ^/end-with-slash/(?.*)", ` - rewrite /end-with-slash/(.*) /not-root/$1 break; - proxy_pass http://upstream-name; - `, false}, + rewrite /end-with-slash/(.*) /not-root/$1 break; + proxy_pass http://upstream-name; + `, false, ""}, "redirect /something-complex to /not-root": {"/something-complex", "/not-root", `~* ^/something-complex\/?(?.*)`, ` - rewrite /something-complex/(.*) /not-root/$1 break; - proxy_pass http://upstream-name; - `, false}, + rewrite /something-complex/(.*) /not-root/$1 break; + proxy_pass http://upstream-name; + `, false, ""}, "redirect / to /jenkins and rewrite": {"/", "/jenkins", "~* /", ` - rewrite /(.*) /jenkins/$1 break; - proxy_pass http://upstream-name; - subs_filter '' '' r; - subs_filter '' '' r; - `, true}, + rewrite /(.*) /jenkins/$1 break; + proxy_pass http://upstream-name; + subs_filter '' '' r; + subs_filter '' '' r; + `, true, ""}, "redirect /something to / and rewrite": {"/something", "/", `~* ^/something\/?(?.*)`, ` - rewrite /something/(.*) /$1 break; - rewrite /something / break; - proxy_pass http://upstream-name; - subs_filter '' '' r; - subs_filter '' '' r; - `, true}, + rewrite /something/(.*) /$1 break; + rewrite /something / break; + proxy_pass http://upstream-name; + subs_filter '' '' r; + subs_filter '' '' r; + `, true, ""}, "redirect /end-with-slash/ to /not-root and rewrite": {"/end-with-slash/", "/not-root", `~* ^/end-with-slash/(?.*)`, ` - rewrite /end-with-slash/(.*) /not-root/$1 break; - proxy_pass http://upstream-name; - subs_filter '' '' r; - subs_filter '' '' r; - `, true}, + rewrite /end-with-slash/(.*) /not-root/$1 break; + proxy_pass http://upstream-name; + subs_filter '' '' r; + subs_filter '' '' r; + `, true, ""}, "redirect /something-complex to /not-root and rewrite": {"/something-complex", "/not-root", `~* ^/something-complex\/?(?.*)`, ` - rewrite /something-complex/(.*) /not-root/$1 break; - proxy_pass http://upstream-name; - subs_filter '' '' r; - subs_filter '' '' r; - `, true}, + rewrite /something-complex/(.*) /not-root/$1 break; + proxy_pass http://upstream-name; + subs_filter '' '' r; + subs_filter '' '' r; + `, true, ""}, + "redirect /something to / and rewrite with specific scheme": {"/something", "/", `~* ^/something\/?(?.*)`, ` + rewrite /something/(.*) /$1 break; + rewrite /something / break; + proxy_pass http://upstream-name; + subs_filter '' '' r; + subs_filter '' '' r; + `, true, "http"}, } ) @@ -110,8 +118,8 @@ func TestFormatIP(t *testing.T) { func TestBuildLocation(t *testing.T) { for k, tc := range tmplFuncTestcases { loc := &ingress.Location{ - Path: tc.Path, - Redirect: rewrite.Redirect{Target: tc.Target, AddBaseURL: tc.AddBaseURL}, + Path: tc.Path, + Rewrite: rewrite.Redirect{Target: tc.Target, AddBaseURL: tc.AddBaseURL}, } newLoc := buildLocation(loc) @@ -124,12 +132,12 @@ func TestBuildLocation(t *testing.T) { func TestBuildProxyPass(t *testing.T) { for k, tc := range tmplFuncTestcases { loc := &ingress.Location{ - Path: tc.Path, - Redirect: rewrite.Redirect{Target: tc.Target, AddBaseURL: tc.AddBaseURL}, - Backend: "upstream-name", + Path: tc.Path, + Rewrite: rewrite.Redirect{Target: tc.Target, AddBaseURL: tc.AddBaseURL, BaseURLScheme: tc.BaseURLScheme}, + Backend: "upstream-name", } - pp := buildProxyPass([]*ingress.Backend{}, loc) + pp := buildProxyPass("", []*ingress.Backend{}, loc) if !strings.EqualFold(tc.ProxyPass, pp) { t.Errorf("%s: expected \n'%v'\nbut returned \n'%v'", k, tc.ProxyPass, pp) } @@ -168,7 +176,9 @@ func TestTemplateWithData(t *testing.T) { if err := json.Unmarshal(data, &dat); err != nil { t.Errorf("unexpected error unmarshalling json: %v", err) } - + if dat.ListenPorts == nil { + dat.ListenPorts = &config.ListenPorts{} + } tf, err := os.Open(path.Join(pwd, "../../rootfs/etc/nginx/template/nginx.tmpl")) if err != nil { t.Errorf("unexpected error reading json file: %v", err) @@ -225,3 +235,140 @@ func TestBuildDenyVariable(t *testing.T) { t.Errorf("Expected '%v' but returned '%v'", a, b) } } + +func TestBuildClientBodyBufferSize(t *testing.T) { + a := isValidClientBodyBufferSize("1000") + if a != true { + t.Errorf("Expected '%v' but returned '%v'", true, a) + } + b := isValidClientBodyBufferSize("1000k") + if b != true { + t.Errorf("Expected '%v' but returned '%v'", true, b) + } + c := isValidClientBodyBufferSize("1000m") + if c != true { + t.Errorf("Expected '%v' but returned '%v'", true, c) + } + d := isValidClientBodyBufferSize("1000km") + if d != false { + t.Errorf("Expected '%v' but returned '%v'", false, d) + } + e := isValidClientBodyBufferSize("1000mk") + if e != false { + t.Errorf("Expected '%v' but returned '%v'", false, e) + } + f := isValidClientBodyBufferSize("1000kk") + if f != false { + t.Errorf("Expected '%v' but returned '%v'", false, f) + } + g := isValidClientBodyBufferSize("1000mm") + if g != false { + t.Errorf("Expected '%v' but returned '%v'", false, g) + } + h := isValidClientBodyBufferSize(nil) + if h != false { + t.Errorf("Expected '%v' but returned '%v'", false, h) + } + i := isValidClientBodyBufferSize("") + if i != false { + t.Errorf("Expected '%v' but returned '%v'", false, i) + } +} + +func TestIsLocationAllowed(t *testing.T) { + loc := ingress.Location{ + Denied: nil, + } + + isAllowed := isLocationAllowed(&loc) + if !isAllowed { + t.Errorf("Expected '%v' but returned '%v'", true, isAllowed) + } +} + +func TestBuildForwardedFor(t *testing.T) { + inputStr := "X-Forwarded-For" + outputStr := buildForwardedFor(inputStr) + + validStr := "$http_x_forwarded_for" + + if outputStr != validStr { + t.Errorf("Expected '%v' but returned '%v'", validStr, outputStr) + } +} + +func TestBuildResolvers(t *testing.T) { + ipOne := net.ParseIP("192.0.0.1") + ipTwo := net.ParseIP("2001:db8:1234:0000:0000:0000:0000:0000") + ipList := []net.IP{ipOne, ipTwo} + + validResolver := "resolver 192.0.0.1 [2001:db8:1234::] valid=30s;" + resolver := buildResolvers(ipList) + + if resolver != validResolver { + t.Errorf("Expected '%v' but returned '%v'", validResolver, resolver) + } +} + +func TestBuildAuthSignURL(t *testing.T) { + urlOne := "http://google.com" + validUrlOne := "http://google.com?rd=$request_uri" + + urlTwo := "http://google.com?cat" + validUrlTwo := "http://google.com?cat&rd=$request_uri" + + authSignURLOne := buildAuthSignURL(urlOne) + if authSignURLOne != validUrlOne { + t.Errorf("Expected '%v' but returned '%v'", validUrlOne, authSignURLOne) + } + + authSignURLTwo := buildAuthSignURL(urlTwo) + if authSignURLTwo != validUrlTwo { + t.Errorf("Expected '%v' but returned '%v'", validUrlTwo, authSignURLTwo) + } +} + +func TestBuildNextUpstream(t *testing.T) { + nextUpstream := "timeout http_500 http_502 non_idempotent" + validNextUpstream := "timeout http_500 http_502" + + buildNextUpstream := buildNextUpstream(nextUpstream) + + if buildNextUpstream != validNextUpstream { + t.Errorf("Expected '%v' but returned '%v'", validNextUpstream, buildNextUpstream) + } +} + +func TestBuildRateLimit(t *testing.T) { + loc := ingress.Location{} + + loc.RateLimit.Connections.Name = "con" + loc.RateLimit.Connections.Limit = 1 + + loc.RateLimit.RPS.Name = "rps" + loc.RateLimit.RPS.Limit = 1 + loc.RateLimit.RPS.Burst = 1 + + loc.RateLimit.RPM.Name = "rpm" + loc.RateLimit.RPM.Limit = 2 + loc.RateLimit.RPM.Burst = 2 + + loc.RateLimit.LimitRateAfter = 1 + loc.RateLimit.LimitRate = 1 + + validLimits := []string{ + "limit_conn con 1;", + "limit_req zone=rps burst=1 nodelay;", + "limit_req zone=rpm burst=2 nodelay;", + "limit_rate_after 1k;", + "limit_rate 1k;", + } + + limits := buildRateLimit(loc) + + for i, limit := range limits { + if limit != validLimits[i] { + t.Errorf("Expected '%v' but returned '%v'", validLimits, limits) + } + } +} diff --git a/controllers/nginx/rootfs/Dockerfile b/controllers/nginx/rootfs/Dockerfile index 23b48f017..59f717534 100644 --- a/controllers/nginx/rootfs/Dockerfile +++ b/controllers/nginx/rootfs/Dockerfile @@ -12,17 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM gcr.io/google_containers/nginx-slim-amd64:0.18 +FROM BASEIMAGE + +CROSS_BUILD_COPY qemu-QEMUARCH-static /usr/bin/ RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y \ diffutils \ --no-install-recommends \ && rm -rf /var/lib/apt/lists/* -RUN curl -sSL -o /sbin/tini https://github.com/krallin/tini/releases/download/v0.14.0/tini-amd64 && \ - chmod +x /sbin/tini +RUN curl -sSL -o /tmp/dumb-init.deb http://ftp.us.debian.org/debian/pool/main/d/dumb-init/dumb-init_1.2.0-1_DUMB_ARCH.deb && \ + dpkg -i /tmp/dumb-init.deb && \ + rm /tmp/dumb-init.deb -ENTRYPOINT ["/sbin/tini", "--"] +ENTRYPOINT ["/usr/bin/dumb-init"] COPY . / diff --git a/controllers/nginx/rootfs/etc/nginx/lua/error_page.lua b/controllers/nginx/rootfs/etc/nginx/lua/error_page.lua deleted file mode 100644 index 2b9178a56..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/error_page.lua +++ /dev/null @@ -1,67 +0,0 @@ -http = require "resty.http" -def_backend = "upstream-default-backend" - -local concat = table.concat -local upstream = require "ngx.upstream" -local get_servers = upstream.get_servers -local get_upstreams = upstream.get_upstreams -local random = math.random -local us = get_upstreams() - -function openURL(original_headers, status) - local httpc = http.new() - - original_headers["X-Code"] = status or "404" - original_headers["X-Format"] = original_headers["Accept"] or "text/html" - - local random_backend = get_destination() - local res, err = httpc:request_uri(random_backend, { - path = "/", - method = "GET", - headers = original_headers, - }) - - if not res then - ngx.log(ngx.ERR, err) - ngx.exit(500) - end - - for k,v in pairs(res.headers) do - ngx.header[k] = v - end - - ngx.status = tonumber(status) - ngx.say(res.body) -end - -function get_destination() - for _, u in ipairs(us) do - if u == def_backend then - local srvs, err = get_servers(u) - local us_table = {} - if not srvs then - return "http://127.0.0.1:8181" - else - for _, srv in ipairs(srvs) do - us_table[srv["name"]] = srv["weight"] - end - end - local destination = random_weight(us_table) - return "http://"..destination - end - end -end - -function random_weight(tbl) - local total = 0 - for k, v in pairs(tbl) do - total = total + v - end - local offset = random(0, total - 1) - for k1, v1 in pairs(tbl) do - if offset < v1 then - return k1 - end - offset = offset - v1 - end -end diff --git a/controllers/nginx/rootfs/etc/nginx/lua/trie.lua b/controllers/nginx/rootfs/etc/nginx/lua/trie.lua deleted file mode 100644 index 5d7441c4e..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/trie.lua +++ /dev/null @@ -1,78 +0,0 @@ --- Simple trie for URLs - -local _M = {} - -local mt = { - __index = _M -} - --- http://lua-users.org/wiki/SplitJoin -local strfind, tinsert, strsub = string.find, table.insert, string.sub -function _M.strsplit(delimiter, text) - local list = {} - local pos = 1 - while 1 do - local first, last = strfind(text, delimiter, pos) - if first then -- found? - tinsert(list, strsub(text, pos, first-1)) - pos = last+1 - else - tinsert(list, strsub(text, pos)) - break - end - end - return list -end - -local strsplit = _M.strsplit - -function _M.new() - local t = { } - return setmetatable(t, mt) -end - -function _M.add(t, key, val) - local parts = {} - -- hack for just / - if key == "/" then - parts = { "" } - else - parts = strsplit("/", key) - end - - local l = t - for i = 1, #parts do - local p = parts[i] - if not l[p] then - l[p] = {} - end - l = l[p] - end - l.__value = val -end - -function _M.get(t, key) - local parts = strsplit("/", key) - - local l = t - - -- this may be nil - local val = t.__value - for i = 1, #parts do - local p = parts[i] - if l[p] then - l = l[p] - local v = l.__value - if v then - val = v - end - else - break - end - end - - -- may be nil - return val -end - -return _M diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/.gitignore b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/.gitignore deleted file mode 100644 index 32f4374b2..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -t/servroot/ -t/error.log diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/LICENSE b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/LICENSE deleted file mode 100644 index f108ed742..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2013, James Hurst -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/Makefile b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/Makefile deleted file mode 100644 index 6ddc2e8fe..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -OPENRESTY_PREFIX=/usr/local/openresty - -PREFIX ?= /usr/local -LUA_INCLUDE_DIR ?= $(PREFIX)/include -LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION) -INSTALL ?= install -TEST_FILE ?= t - -.PHONY: all test install - -all: ; - -install: all - $(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty - $(INSTALL) lib/resty/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/ - -test: all - PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH TEST_NGINX_NO_SHUFFLE=1 prove -I../test-nginx/lib -r $(TEST_FILE) - util/lua-releng - diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/README.md b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/README.md deleted file mode 100644 index f0135ffe4..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/README.md +++ /dev/null @@ -1,424 +0,0 @@ -# lua-resty-http - -Lua HTTP client cosocket driver for [OpenResty](http://openresty.org/) / [ngx_lua](https://github.com/openresty/lua-nginx-module). - -# Status - -Production ready. - -# Features - -* HTTP 1.0 and 1.1 -* SSL -* Streaming interface to the response body, for predictable memory usage -* Alternative simple interface for singleshot requests without manual connection step -* Chunked and non-chunked transfer encodings -* Keepalive -* Pipelining -* Trailers - - -# API - -* [new](#name) -* [connect](#connect) -* [set_timeout](#set_timeout) -* [ssl_handshake](#ssl_handshake) -* [set_keepalive](#set_keepalive) -* [get_reused_times](#get_reused_times) -* [close](#close) -* [request](#request) -* [request_uri](#request_uri) -* [request_pipeline](#request_pipeline) -* [Response](#response) - * [body_reader](#resbody_reader) - * [read_body](#resread_body) - * [read_trailes](#resread_trailers) -* [Proxy](#proxy) - * [proxy_request](#proxy_request) - * [proxy_response](#proxy_response) -* [Utility](#utility) - * [parse_uri](#parse_uri) - * [get_client_body_reader](#get_client_body_reader) - - -## Synopsis - -```` lua -lua_package_path "/path/to/lua-resty-http/lib/?.lua;;"; - -server { - - - location /simpleinterface { - resolver 8.8.8.8; # use Google's open DNS server for an example - - content_by_lua ' - - -- For simple singleshot requests, use the URI interface. - local http = require "resty.http" - local httpc = http.new() - local res, err = httpc:request_uri("http://example.com/helloworld", { - method = "POST", - body = "a=1&b=2", - headers = { - ["Content-Type"] = "application/x-www-form-urlencoded", - } - }) - - if not res then - ngx.say("failed to request: ", err) - return - end - - -- In this simple form, there is no manual connection step, so the body is read - -- all in one go, including any trailers, and the connection closed or keptalive - -- for you. - - ngx.status = res.status - - for k,v in pairs(res.headers) do - -- - end - - ngx.say(res.body) - '; - } - - - location /genericinterface { - content_by_lua ' - - local http = require "resty.http" - local httpc = http.new() - - -- The generic form gives us more control. We must connect manually. - httpc:set_timeout(500) - httpc:connect("127.0.0.1", 80) - - -- And request using a path, rather than a full URI. - local res, err = httpc:request{ - path = "/helloworld", - headers = { - ["Host"] = "example.com", - }, - } - - if not res then - ngx.say("failed to request: ", err) - return - end - - -- Now we can use the body_reader iterator, to stream the body according to our desired chunk size. - local reader = res.body_reader - - repeat - local chunk, err = reader(8192) - if err then - ngx.log(ngx.ERR, err) - break - end - - if chunk then - -- process - end - until not chunk - - local ok, err = httpc:set_keepalive() - if not ok then - ngx.say("failed to set keepalive: ", err) - return - end - '; - } -} -```` - -# Connection - -## new - -`syntax: httpc = http.new()` - -Creates the http object. In case of failures, returns `nil` and a string describing the error. - -## connect - -`syntax: ok, err = httpc:connect(host, port, options_table?)` - -`syntax: ok, err = httpc:connect("unix:/path/to/unix.sock", options_table?)` - -Attempts to connect to the web server. - -Before actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method. - -An optional Lua table can be specified as the last argument to this method to specify various connect options: - -* `pool` -: Specifies a custom name for the connection pool being used. If omitted, then the connection pool name will be generated from the string template `:` or ``. - -## set_timeout - -`syntax: httpc:set_timeout(time)` - -Sets the timeout (in ms) protection for subsequent operations, including the `connect` method. - -## ssl_handshake - -`syntax: session, err = httpc:ssl_handshake(session, host, verify)` - -Performs an SSL handshake on the TCP connection, only availble in ngx_lua > v0.9.11 - -See docs for [ngx.socket.tcp](https://github.com/openresty/lua-nginx-module#ngxsockettcp) for details. - -## set_keepalive - -`syntax: ok, err = httpc:set_keepalive(max_idle_timeout, pool_size)` - -Attempts to puts the current connection into the ngx_lua cosocket connection pool. - -You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process. - -Only call this method in the place you would have called the `close` method instead. Calling this method will immediately turn the current http object into the `closed` state. Any subsequent operations other than `connect()` on the current objet will return the `closed` error. - -Note that calling this instead of `close` is "safe" in that it will conditionally close depending on the type of request. Specifically, a `1.0` request without `Connection: Keep-Alive` will be closed, as will a `1.1` request with `Connection: Close`. - -In case of success, returns `1`. In case of errors, returns `nil, err`. In the case where the conneciton is conditionally closed as described above, returns `2` and the error string `connection must be closed`. - -## get_reused_times - -`syntax: times, err = httpc:get_reused_times()` - -This method returns the (successfully) reused times for the current connection. In case of error, it returns `nil` and a string describing the error. - -If the current connection does not come from the built-in connection pool, then this method always returns `0`, that is, the connection has never been reused (yet). If the connection comes from the connection pool, then the return value is always non-zero. So this method can also be used to determine if the current connection comes from the pool. - -## close - -`syntax: ok, err = http:close()` - -Closes the current connection and returns the status. - -In case of success, returns `1`. In case of errors, returns `nil` with a string describing the error. - - -# Requesting - -## request - -`syntax: res, err = httpc:request(params)` - -Returns a `res` table or `nil` and an error message. - -The `params` table accepts the following fields: - -* `version` The HTTP version number, currently supporting 1.0 or 1.1. -* `method` The HTTP method string. -* `path` The path string. -* `headers` A table of request headers. -* `body` The request body as a string, or an iterator function (see [get_client_body_reader](#get_client_body_reader)). -* `ssl_verify` Verify SSL cert matches hostname - -When the request is successful, `res` will contain the following fields: - -* `status` The status code. -* `reason` The status reason phrase. -* `headers` A table of headers. Multiple headers with the same field name will be presented as a table of values. -* `has_body` A boolean flag indicating if there is a body to be read. -* `body_reader` An iterator function for reading the body in a streaming fashion. -* `read_body` A method to read the entire body into a string. -* `read_trailers` A method to merge any trailers underneath the headers, after reading the body. - -## request_uri - -`syntax: res, err = httpc:request_uri(uri, params)` - -The simple interface. Options supplied in the `params` table are the same as in the generic interface, and will override components found in the uri itself. - -In this mode, there is no need to connect manually first. The connection is made on your behalf, suiting cases where you simply need to grab a URI without too much hassle. - -Additionally there is no ability to stream the response body in this mode. If the request is successful, `res` will contain the following fields: - -* `status` The status code. -* `headers` A table of headers. -* `body` The response body as a string. - - -## request_pipeline - -`syntax: responses, err = httpc:request_pipeline(params)` - -This method works as per the [request](#request) method above, but `params` is instead a table of param tables. Each request is sent in order, and `responses` is returned as a table of response handles. For example: - -```lua -local responses = httpc:request_pipeline{ - { - path = "/b", - }, - { - path = "/c", - }, - { - path = "/d", - } -} - -for i,r in ipairs(responses) do - if r.status then - ngx.say(r.status) - ngx.say(r:read_body()) - end -end -``` - -Due to the nature of pipelining, no responses are actually read until you attempt to use the response fields (status / headers etc). And since the responses are read off in order, you must read the entire body (and any trailers if you have them), before attempting to read the next response. - -Note this doesn't preclude the use of the streaming response body reader. Responses can still be streamed, so long as the entire body is streamed before attempting to access the next response. - -Be sure to test at least one field (such as status) before trying to use the others, in case a socket read error has occurred. - -# Response - -## res.body_reader - -The `body_reader` iterator can be used to stream the response body in chunk sizes of your choosing, as follows: - -````lua -local reader = res.body_reader - -repeat - local chunk, err = reader(8192) - if err then - ngx.log(ngx.ERR, err) - break - end - - if chunk then - -- process - end -until not chunk -```` - -If the reader is called with no arguments, the behaviour depends on the type of connection. If the response is encoded as chunked, then the iterator will return the chunks as they arrive. If not, it will simply return the entire body. - -Note that the size provided is actually a **maximum** size. So in the chunked transfer case, you may get chunks smaller than the size you ask, as a remainder of the actual HTTP chunks. - -## res:read_body - -`syntax: body, err = res:read_body()` - -Reads the entire body into a local string. - - -## res:read_trailers - -`syntax: res:read_trailers()` - -This merges any trailers underneath the `res.headers` table itself. Must be called after reading the body. - - -# Proxy - -There are two convenience methods for when one simply wishes to proxy the current request to the connected upstream, and safely send it downstream to the client, as a reverse proxy. A complete example: - -```lua -local http = require "resty.http" -local httpc = http.new() - -httpc:set_timeout(500) -local ok, err = httpc:connect(HOST, PORT) - -if not ok then - ngx.log(ngx.ERR, err) - return -end - -httpc:set_timeout(2000) -httpc:proxy_response(httpc:proxy_request()) -httpc:set_keepalive() -``` - - -## proxy_request - -`syntax: local res, err = httpc:proxy_request(request_body_chunk_size?)` - -Performs a request using the current client request arguments, effectively proxying to the connected upstream. The request body will be read in a streaming fashion, according to `request_body_chunk_size` (see [documentation on the client body reader](#get_client_body_reader) below). - - -## proxy_response - -`syntax: httpc:proxy_response(res, chunksize?)` - -Sets the current response based on the given `res`. Ensures that hop-by-hop headers are not sent downstream, and will read the response according to `chunksize` (see [documentation on the body reader](#resbody_reader) above). - - -# Utility - -## parse_uri - -`syntax: local scheme, host, port, path = unpack(httpc:parse_uri(uri))` - -This is a convenience function allowing one to more easily use the generic interface, when the input data is a URI. - - -## get_client_body_reader - -`syntax: reader, err = httpc:get_client_body_reader(chunksize?, sock?)` - -Returns an iterator function which can be used to read the downstream client request body in a streaming fashion. You may also specify an optional default chunksize (default is `65536`), or an already established socket in -place of the client request. - -Example: - -```lua -local req_reader = httpc:get_client_body_reader() - -repeat - local chunk, err = req_reader(8192) - if err then - ngx.log(ngx.ERR, err) - break - end - - if chunk then - -- process - end -until not chunk -``` - -This iterator can also be used as the value for the body field in request params, allowing one to stream the request body into a proxied upstream request. - -```lua -local client_body_reader, err = httpc:get_client_body_reader() - -local res, err = httpc:request{ - path = "/helloworld", - body = client_body_reader, -} -``` - -If `sock` is specified, - -# Author - -James Hurst - -Originally started life based on https://github.com/bakins/lua-resty-http-simple. Cosocket docs and implementation borrowed from the other lua-resty-* cosocket modules. - - -# Licence - -This module is licensed under the 2-clause BSD license. - -Copyright (c) 2013-2016, James Hurst - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/lib/resty/http.lua b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/lib/resty/http.lua deleted file mode 100644 index 94f9813ef..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/lib/resty/http.lua +++ /dev/null @@ -1,850 +0,0 @@ -local http_headers = require "resty.http_headers" - -local ngx_socket_tcp = ngx.socket.tcp -local ngx_req = ngx.req -local ngx_req_socket = ngx_req.socket -local ngx_req_get_headers = ngx_req.get_headers -local ngx_req_get_method = ngx_req.get_method -local str_gmatch = string.gmatch -local str_lower = string.lower -local str_upper = string.upper -local str_find = string.find -local str_sub = string.sub -local str_gsub = string.gsub -local tbl_concat = table.concat -local tbl_insert = table.insert -local ngx_encode_args = ngx.encode_args -local ngx_re_match = ngx.re.match -local ngx_re_gsub = ngx.re.gsub -local ngx_log = ngx.log -local ngx_DEBUG = ngx.DEBUG -local ngx_ERR = ngx.ERR -local ngx_NOTICE = ngx.NOTICE -local ngx_var = ngx.var -local co_yield = coroutine.yield -local co_create = coroutine.create -local co_status = coroutine.status -local co_resume = coroutine.resume - - --- http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.5.1 -local HOP_BY_HOP_HEADERS = { - ["connection"] = true, - ["keep-alive"] = true, - ["proxy-authenticate"] = true, - ["proxy-authorization"] = true, - ["te"] = true, - ["trailers"] = true, - ["transfer-encoding"] = true, - ["upgrade"] = true, - ["content-length"] = true, -- Not strictly hop-by-hop, but Nginx will deal - -- with this (may send chunked for example). -} - - --- Reimplemented coroutine.wrap, returning "nil, err" if the coroutine cannot --- be resumed. This protects user code from inifite loops when doing things like --- repeat --- local chunk, err = res.body_reader() --- if chunk then -- <-- This could be a string msg in the core wrap function. --- ... --- end --- until not chunk -local co_wrap = function(func) - local co = co_create(func) - if not co then - return nil, "could not create coroutine" - else - return function(...) - if co_status(co) == "suspended" then - return select(2, co_resume(co, ...)) - else - return nil, "can't resume a " .. co_status(co) .. " coroutine" - end - end - end -end - - -local _M = { - _VERSION = '0.09', -} -_M._USER_AGENT = "lua-resty-http/" .. _M._VERSION .. " (Lua) ngx_lua/" .. ngx.config.ngx_lua_version - -local mt = { __index = _M } - - -local HTTP = { - [1.0] = " HTTP/1.0\r\n", - [1.1] = " HTTP/1.1\r\n", -} - -local DEFAULT_PARAMS = { - method = "GET", - path = "/", - version = 1.1, -} - - -function _M.new(self) - local sock, err = ngx_socket_tcp() - if not sock then - return nil, err - end - return setmetatable({ sock = sock, keepalive = true }, mt) -end - - -function _M.set_timeout(self, timeout) - local sock = self.sock - if not sock then - return nil, "not initialized" - end - - return sock:settimeout(timeout) -end - - -function _M.ssl_handshake(self, ...) - local sock = self.sock - if not sock then - return nil, "not initialized" - end - - self.ssl = true - - return sock:sslhandshake(...) -end - - -function _M.connect(self, ...) - local sock = self.sock - if not sock then - return nil, "not initialized" - end - - self.host = select(1, ...) - self.port = select(2, ...) - - -- If port is not a number, this is likely a unix domain socket connection. - if type(self.port) ~= "number" then - self.port = nil - end - - self.keepalive = true - - return sock:connect(...) -end - - -function _M.set_keepalive(self, ...) - local sock = self.sock - if not sock then - return nil, "not initialized" - end - - if self.keepalive == true then - return sock:setkeepalive(...) - else - -- The server said we must close the connection, so we cannot setkeepalive. - -- If close() succeeds we return 2 instead of 1, to differentiate between - -- a normal setkeepalive() failure and an intentional close(). - local res, err = sock:close() - if res then - return 2, "connection must be closed" - else - return res, err - end - end -end - - -function _M.get_reused_times(self) - local sock = self.sock - if not sock then - return nil, "not initialized" - end - - return sock:getreusedtimes() -end - - -function _M.close(self) - local sock = self.sock - if not sock then - return nil, "not initialized" - end - - return sock:close() -end - - -local function _should_receive_body(method, code) - if method == "HEAD" then return nil end - if code == 204 or code == 304 then return nil end - if code >= 100 and code < 200 then return nil end - return true -end - - -function _M.parse_uri(self, uri) - local m, err = ngx_re_match(uri, [[^(http[s]?)://([^:/]+)(?::(\d+))?(.*)]], - "jo") - - if not m then - if err then - return nil, "failed to match the uri: " .. uri .. ", " .. err - end - - return nil, "bad uri: " .. uri - else - if m[3] then - m[3] = tonumber(m[3]) - else - if m[1] == "https" then - m[3] = 443 - else - m[3] = 80 - end - end - if not m[4] or "" == m[4] then m[4] = "/" end - return m, nil - end -end - - -local function _format_request(params) - local version = params.version - local headers = params.headers or {} - - local query = params.query or "" - if query then - if type(query) == "table" then - query = "?" .. ngx_encode_args(query) - end - end - - -- Initialize request - local req = { - str_upper(params.method), - " ", - params.path, - query, - HTTP[version], - -- Pre-allocate slots for minimum headers and carriage return. - true, - true, - true, - } - local c = 6 -- req table index it's faster to do this inline vs table.insert - - -- Append headers - for key, values in pairs(headers) do - if type(values) ~= "table" then - values = {values} - end - - key = tostring(key) - for _, value in pairs(values) do - req[c] = key .. ": " .. tostring(value) .. "\r\n" - c = c + 1 - end - end - - -- Close headers - req[c] = "\r\n" - - return tbl_concat(req) -end - - -local function _receive_status(sock) - local line, err = sock:receive("*l") - if not line then - return nil, nil, nil, err - end - - return tonumber(str_sub(line, 10, 12)), tonumber(str_sub(line, 6, 8)), str_sub(line, 14) -end - - - -local function _receive_headers(sock) - local headers = http_headers.new() - - repeat - local line, err = sock:receive("*l") - if not line then - return nil, err - end - - for key, val in str_gmatch(line, "([^:%s]+):%s*(.+)") do - if headers[key] then - if type(headers[key]) ~= "table" then - headers[key] = { headers[key] } - end - tbl_insert(headers[key], tostring(val)) - else - headers[key] = tostring(val) - end - end - until str_find(line, "^%s*$") - - return headers, nil -end - - -local function _chunked_body_reader(sock, default_chunk_size) - return co_wrap(function(max_chunk_size) - local max_chunk_size = max_chunk_size or default_chunk_size - local remaining = 0 - local length - - repeat - -- If we still have data on this chunk - if max_chunk_size and remaining > 0 then - - if remaining > max_chunk_size then - -- Consume up to max_chunk_size - length = max_chunk_size - remaining = remaining - max_chunk_size - else - -- Consume all remaining - length = remaining - remaining = 0 - end - else -- This is a fresh chunk - - -- Receive the chunk size - local str, err = sock:receive("*l") - if not str then - co_yield(nil, err) - end - - length = tonumber(str, 16) - - if not length then - co_yield(nil, "unable to read chunksize") - end - - if max_chunk_size and length > max_chunk_size then - -- Consume up to max_chunk_size - remaining = length - max_chunk_size - length = max_chunk_size - end - end - - if length > 0 then - local str, err = sock:receive(length) - if not str then - co_yield(nil, err) - end - - max_chunk_size = co_yield(str) or default_chunk_size - - -- If we're finished with this chunk, read the carriage return. - if remaining == 0 then - sock:receive(2) -- read \r\n - end - else - -- Read the last (zero length) chunk's carriage return - sock:receive(2) -- read \r\n - end - - until length == 0 - end) -end - - -local function _body_reader(sock, content_length, default_chunk_size) - return co_wrap(function(max_chunk_size) - local max_chunk_size = max_chunk_size or default_chunk_size - - if not content_length and max_chunk_size then - -- We have no length, but wish to stream. - -- HTTP 1.0 with no length will close connection, so read chunks to the end. - repeat - local str, err, partial = sock:receive(max_chunk_size) - if not str and err == "closed" then - max_chunk_size = tonumber(co_yield(partial, err) or default_chunk_size) - end - - max_chunk_size = tonumber(co_yield(str) or default_chunk_size) - if max_chunk_size and max_chunk_size < 0 then max_chunk_size = nil end - - if not max_chunk_size then - ngx_log(ngx_ERR, "Buffer size not specified, bailing") - break - end - until not str - - elseif not content_length then - -- We have no length but don't wish to stream. - -- HTTP 1.0 with no length will close connection, so read to the end. - co_yield(sock:receive("*a")) - - elseif not max_chunk_size then - -- We have a length and potentially keep-alive, but want everything. - co_yield(sock:receive(content_length)) - - else - -- We have a length and potentially a keep-alive, and wish to stream - -- the response. - local received = 0 - repeat - local length = max_chunk_size - if received + length > content_length then - length = content_length - received - end - - if length > 0 then - local str, err = sock:receive(length) - if not str then - max_chunk_size = tonumber(co_yield(nil, err) or default_chunk_size) - end - received = received + length - - max_chunk_size = tonumber(co_yield(str) or default_chunk_size) - if max_chunk_size and max_chunk_size < 0 then max_chunk_size = nil end - - if not max_chunk_size then - ngx_log(ngx_ERR, "Buffer size not specified, bailing") - break - end - end - - until length == 0 - end - end) -end - - -local function _no_body_reader() - return nil -end - - -local function _read_body(res) - local reader = res.body_reader - - if not reader then - -- Most likely HEAD or 304 etc. - return nil, "no body to be read" - end - - local chunks = {} - local c = 1 - - local chunk, err - repeat - chunk, err = reader() - - if err then - return nil, err, tbl_concat(chunks) -- Return any data so far. - end - if chunk then - chunks[c] = chunk - c = c + 1 - end - until not chunk - - return tbl_concat(chunks) -end - - -local function _trailer_reader(sock) - return co_wrap(function() - co_yield(_receive_headers(sock)) - end) -end - - -local function _read_trailers(res) - local reader = res.trailer_reader - if not reader then - return nil, "no trailers" - end - - local trailers = reader() - setmetatable(res.headers, { __index = trailers }) -end - - -local function _send_body(sock, body) - if type(body) == 'function' then - repeat - local chunk, err, partial = body() - - if chunk then - local ok,err = sock:send(chunk) - - if not ok then - return nil, err - end - elseif err ~= nil then - return nil, err, partial - end - - until chunk == nil - elseif body ~= nil then - local bytes, err = sock:send(body) - - if not bytes then - return nil, err - end - end - return true, nil -end - - -local function _handle_continue(sock, body) - local status, version, reason, err = _receive_status(sock) - if not status then - return nil, nil, err - end - - -- Only send body if we receive a 100 Continue - if status == 100 then - local ok, err = sock:receive("*l") -- Read carriage return - if not ok then - return nil, nil, err - end - _send_body(sock, body) - end - return status, version, err -end - - -function _M.send_request(self, params) - -- Apply defaults - setmetatable(params, { __index = DEFAULT_PARAMS }) - - local sock = self.sock - local body = params.body - local headers = http_headers.new() - - local params_headers = params.headers - if params_headers then - -- We assign one by one so that the metatable can handle case insensitivity - -- for us. You can blame the spec for this inefficiency. - for k,v in pairs(params_headers) do - headers[k] = v - end - end - - -- Ensure minimal headers are set - if type(body) == 'string' and not headers["Content-Length"] then - headers["Content-Length"] = #body - end - if not headers["Host"] then - if (str_sub(self.host, 1, 5) == "unix:") then - return nil, "Unable to generate a useful Host header for a unix domain socket. Please provide one." - end - -- If we have a port (i.e. not connected to a unix domain socket), and this - -- port is non-standard, append it to the Host heaer. - if self.port then - if self.ssl and self.port ~= 443 then - headers["Host"] = self.host .. ":" .. self.port - elseif not self.ssl and self.port ~= 80 then - headers["Host"] = self.host .. ":" .. self.port - else - headers["Host"] = self.host - end - else - headers["Host"] = self.host - end - end - if not headers["User-Agent"] then - headers["User-Agent"] = _M._USER_AGENT - end - if params.version == 1.0 and not headers["Connection"] then - headers["Connection"] = "Keep-Alive" - end - - params.headers = headers - - -- Format and send request - local req = _format_request(params) - ngx_log(ngx_DEBUG, "\n", req) - local bytes, err = sock:send(req) - - if not bytes then - return nil, err - end - - -- Send the request body, unless we expect: continue, in which case - -- we handle this as part of reading the response. - if headers["Expect"] ~= "100-continue" then - local ok, err, partial = _send_body(sock, body) - if not ok then - return nil, err, partial - end - end - - return true -end - - -function _M.read_response(self, params) - local sock = self.sock - - local status, version, reason, err - - -- If we expect: continue, we need to handle this, sending the body if allowed. - -- If we don't get 100 back, then status is the actual status. - if params.headers["Expect"] == "100-continue" then - local _status, _version, _err = _handle_continue(sock, params.body) - if not _status then - return nil, _err - elseif _status ~= 100 then - status, version, err = _status, _version, _err - end - end - - -- Just read the status as normal. - if not status then - status, version, reason, err = _receive_status(sock) - if not status then - return nil, err - end - end - - - local res_headers, err = _receive_headers(sock) - if not res_headers then - return nil, err - end - - -- keepalive is true by default. Determine if this is correct or not. - local ok, connection = pcall(str_lower, res_headers["Connection"]) - if ok then - if (version == 1.1 and connection == "close") or - (version == 1.0 and connection ~= "keep-alive") then - self.keepalive = false - end - else - -- no connection header - if version == 1.0 then - self.keepalive = false - end - end - - local body_reader = _no_body_reader - local trailer_reader, err = nil, nil - local has_body = false - - -- Receive the body_reader - if _should_receive_body(params.method, status) then - local ok, encoding = pcall(str_lower, res_headers["Transfer-Encoding"]) - if ok and version == 1.1 and encoding == "chunked" then - body_reader, err = _chunked_body_reader(sock) - has_body = true - else - - local ok, length = pcall(tonumber, res_headers["Content-Length"]) - if ok then - body_reader, err = _body_reader(sock, length) - has_body = true - end - end - end - - if res_headers["Trailer"] then - trailer_reader, err = _trailer_reader(sock) - end - - if err then - return nil, err - else - return { - status = status, - reason = reason, - headers = res_headers, - has_body = has_body, - body_reader = body_reader, - read_body = _read_body, - trailer_reader = trailer_reader, - read_trailers = _read_trailers, - } - end -end - - -function _M.request(self, params) - local res, err = self:send_request(params) - if not res then - return res, err - else - return self:read_response(params) - end -end - - -function _M.request_pipeline(self, requests) - for i, params in ipairs(requests) do - if params.headers and params.headers["Expect"] == "100-continue" then - return nil, "Cannot pipeline request specifying Expect: 100-continue" - end - - local res, err = self:send_request(params) - if not res then - return res, err - end - end - - local responses = {} - for i, params in ipairs(requests) do - responses[i] = setmetatable({ - params = params, - response_read = false, - }, { - -- Read each actual response lazily, at the point the user tries - -- to access any of the fields. - __index = function(t, k) - local res, err - if t.response_read == false then - res, err = _M.read_response(self, t.params) - t.response_read = true - - if not res then - ngx_log(ngx_ERR, err) - else - for rk, rv in pairs(res) do - t[rk] = rv - end - end - end - return rawget(t, k) - end, - }) - end - return responses -end - - -function _M.request_uri(self, uri, params) - if not params then params = {} end - - local parsed_uri, err = self:parse_uri(uri) - if not parsed_uri then - return nil, err - end - - local scheme, host, port, path = unpack(parsed_uri) - if not params.path then params.path = path end - - local c, err = self:connect(host, port) - if not c then - return nil, err - end - - if scheme == "https" then - local verify = true - if params.ssl_verify == false then - verify = false - end - local ok, err = self:ssl_handshake(nil, host, verify) - if not ok then - return nil, err - end - end - - local res, err = self:request(params) - if not res then - return nil, err - end - - local body, err = res:read_body() - if not body then - return nil, err - end - - res.body = body - - local ok, err = self:set_keepalive() - if not ok then - ngx_log(ngx_ERR, err) - end - - return res, nil -end - - -function _M.get_client_body_reader(self, chunksize, sock) - local chunksize = chunksize or 65536 - if not sock then - local ok, err - ok, sock, err = pcall(ngx_req_socket) - - if not ok then - return nil, sock -- pcall err - end - - if not sock then - if err == "no body" then - return nil - else - return nil, err - end - end - end - - local headers = ngx_req_get_headers() - local length = headers.content_length - local encoding = headers.transfer_encoding - if length then - return _body_reader(sock, tonumber(length), chunksize) - elseif encoding and str_lower(encoding) == 'chunked' then - -- Not yet supported by ngx_lua but should just work... - return _chunked_body_reader(sock, chunksize) - else - return nil - end -end - - -function _M.proxy_request(self, chunksize) - return self:request{ - method = ngx_req_get_method(), - path = ngx_re_gsub(ngx_var.uri, "\\s", "%20", "jo") .. ngx_var.is_args .. (ngx_var.query_string or ""), - body = self:get_client_body_reader(chunksize), - headers = ngx_req_get_headers(), - } -end - - -function _M.proxy_response(self, response, chunksize) - if not response then - ngx_log(ngx_ERR, "no response provided") - return - end - - ngx.status = response.status - - -- Filter out hop-by-hop headeres - for k,v in pairs(response.headers) do - if not HOP_BY_HOP_HEADERS[str_lower(k)] then - ngx.header[k] = v - end - end - - local reader = response.body_reader - repeat - local chunk, err = reader(chunksize) - if err then - ngx_log(ngx_ERR, err) - break - end - - if chunk then - local res, err = ngx.print(chunk) - if not res then - ngx_log(ngx_ERR, err) - break - end - end - until not chunk -end - - -return _M diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/lib/resty/http_headers.lua b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/lib/resty/http_headers.lua deleted file mode 100644 index 24b53b533..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/lib/resty/http_headers.lua +++ /dev/null @@ -1,62 +0,0 @@ -local rawget, rawset, setmetatable = - rawget, rawset, setmetatable - -local str_gsub = string.gsub -local str_lower = string.lower - - -local _M = { - _VERSION = '0.01', -} - - --- Returns an empty headers table with internalised case normalisation. --- Supports the same cases as in ngx_lua: --- --- headers.content_length --- headers["content-length"] --- headers["Content-Length"] -function _M.new(self) - local mt = { - normalised = {}, - } - - - mt.__index = function(t, k) - local k_hyphened = str_gsub(k, "_", "-") - local matched = rawget(t, k) - if matched then - return matched - else - local k_normalised = str_lower(k_hyphened) - return rawget(t, mt.normalised[k_normalised]) - end - end - - - -- First check the normalised table. If there's no match (first time) add an entry for - -- our current case in the normalised table. This is to preserve the human (prettier) case - -- instead of outputting lowercased header names. - -- - -- If there's a match, we're being updated, just with a different case for the key. We use - -- the normalised table to give us the original key, and perorm a rawset(). - mt.__newindex = function(t, k, v) - -- we support underscore syntax, so always hyphenate. - local k_hyphened = str_gsub(k, "_", "-") - - -- lowercase hyphenated is "normalised" - local k_normalised = str_lower(k_hyphened) - - if not mt.normalised[k_normalised] then - mt.normalised[k_normalised] = k_hyphened - rawset(t, k_hyphened, v) - else - rawset(t, mt.normalised[k_normalised], v) - end - end - - return setmetatable({}, mt) -end - - -return _M diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/lua-resty-http-0.09-0.rockspec b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/lua-resty-http-0.09-0.rockspec deleted file mode 100644 index 589c0ca29..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/lua-resty-http-0.09-0.rockspec +++ /dev/null @@ -1,22 +0,0 @@ -package = "lua-resty-http" -version = "0.09-0" -source = { - url = "git://github.com/pintsized/lua-resty-http", - tag = "v0.09" -} -description = { - summary = "Lua HTTP client cosocket driver for OpenResty / ngx_lua.", - homepage = "https://github.com/pintsized/lua-resty-http", - license = "2-clause BSD", - maintainer = "James Hurst " -} -dependencies = { - "lua >= 5.1" -} -build = { - type = "builtin", - modules = { - ["resty.http"] = "lib/resty/http.lua", - ["resty.http_headers"] = "lib/resty/http_headers.lua" - } -} diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/01-basic.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/01-basic.t deleted file mode 100644 index 79d4ee7e0..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/01-basic.t +++ /dev/null @@ -1,233 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 4) + 1; - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - error_log logs/error.log debug; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: Simple default get. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b" - } - - ngx.status = res.status - ngx.print(res:read_body()) - - httpc:close() - '; - } - location = /b { - echo "OK"; - } ---- request -GET /a ---- response_body -OK ---- no_error_log -[error] -[warn] - - -=== TEST 2: HTTP 1.0 ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - version = 1.0, - path = "/b" - } - - ngx.status = res.status - ngx.print(res:read_body()) - - httpc:close() - '; - } - location = /b { - echo "OK"; - } ---- request -GET /a ---- response_body -OK ---- no_error_log -[error] -[warn] - - -=== TEST 3: Status code and reason phrase ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b" - } - - ngx.status = res.status - ngx.say(res.reason) - ngx.print(res:read_body()) - - httpc:close() - '; - } - location = /b { - content_by_lua ' - ngx.status = 404 - ngx.say("OK") - '; - } ---- request -GET /a ---- response_body -Not Found -OK ---- error_code: 404 ---- no_error_log -[error] -[warn] - - -=== TEST 4: Response headers ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b" - } - - ngx.status = res.status - ngx.say(res.headers["X-Test"]) - - httpc:close() - '; - } - location = /b { - content_by_lua ' - ngx.header["X-Test"] = "x-value" - ngx.say("OK") - '; - } ---- request -GET /a ---- response_body -x-value ---- no_error_log -[error] -[warn] - - -=== TEST 5: Query ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - query = { - a = 1, - b = 2, - }, - path = "/b" - } - - ngx.status = res.status - - for k,v in pairs(res.headers) do - ngx.header[k] = v - end - - ngx.print(res:read_body()) - - httpc:close() - '; - } - location = /b { - content_by_lua ' - for k,v in pairs(ngx.req.get_uri_args()) do - ngx.header["X-Header-" .. string.upper(k)] = v - end - '; - } ---- request -GET /a ---- response_headers -X-Header-A: 1 -X-Header-B: 2 ---- no_error_log -[error] -[warn] - - -=== TEST 7: HEAD has no body. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - method = "HEAD", - path = "/b" - } - - local body = res:read_body() - - if body then - ngx.print(body) - end - httpc:close() - '; - } - location = /b { - echo "OK"; - } ---- request -GET /a ---- response_body ---- no_error_log -[error] -[warn] - diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/02-chunked.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/02-chunked.t deleted file mode 100644 index f3dda676f..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/02-chunked.t +++ /dev/null @@ -1,158 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 4); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: Non chunked. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b" - } - - local body = res:read_body() - - ngx.say(#body) - httpc:close() - '; - } - location = /b { - chunked_transfer_encoding off; - content_by_lua ' - local len = 32768 - local t = {} - for i=1,len do - t[i] = 0 - end - ngx.print(table.concat(t)) - '; - } ---- request -GET /a ---- response_body -32768 ---- no_error_log -[error] -[warn] - - -=== TEST 2: Chunked. The number of chunks received when no max size is given proves the response was in fact chunked. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b" - } - - local chunks = {} - local c = 1 - repeat - local chunk, err = res.body_reader() - if chunk then - chunks[c] = chunk - c = c + 1 - end - until not chunk - - local body = table.concat(chunks) - - ngx.say(#body) - ngx.say(#chunks) - httpc:close() - '; - } - location = /b { - content_by_lua ' - local len = 32768 - local t = {} - for i=1,len do - t[i] = 0 - end - ngx.print(table.concat(t)) - local len = 32768 - local t = {} - for i=1,len do - t[i] = 0 - end - ngx.print(table.concat(t)) - '; - } ---- request -GET /a ---- response_body -65536 -2 ---- no_error_log -[error] -[warn] - - -=== TEST 3: Chunked using read_body method. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b" - } - - local body = res:read_body() - - ngx.say(#body) - httpc:close() - '; - } - location = /b { - content_by_lua ' - local len = 32768 - local t = {} - for i=1,len do - t[i] = 0 - end - ngx.print(table.concat(t)) - local len = 32768 - local t = {} - for i=1,len do - t[i] = 0 - end - ngx.print(table.concat(t)) - '; - } ---- request -GET /a ---- response_body -65536 ---- no_error_log -[error] -[warn] diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/03-requestbody.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/03-requestbody.t deleted file mode 100644 index a17d7dfd2..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/03-requestbody.t +++ /dev/null @@ -1,185 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 4); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: POST form-urlencoded ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - body = "a=1&b=2&c=3", - path = "/b", - headers = { - ["Content-Type"] = "application/x-www-form-urlencoded", - } - } - - ngx.say(res:read_body()) - httpc:close() - '; - } - location = /b { - content_by_lua ' - ngx.req.read_body() - local args = ngx.req.get_post_args() - ngx.say("a: ", args.a) - ngx.say("b: ", args.b) - ngx.print("c: ", args.c) - '; - } ---- request -GET /a ---- response_body -a: 1 -b: 2 -c: 3 ---- no_error_log -[error] -[warn] - - -=== TEST 2: POST form-urlencoded 1.0 ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - method = "POST", - body = "a=1&b=2&c=3", - path = "/b", - headers = { - ["Content-Type"] = "application/x-www-form-urlencoded", - }, - version = 1.0, - } - - ngx.say(res:read_body()) - httpc:close() - '; - } - location = /b { - content_by_lua ' - ngx.req.read_body() - local args = ngx.req.get_post_args() - ngx.say(ngx.req.get_method()) - ngx.say("a: ", args.a) - ngx.say("b: ", args.b) - ngx.print("c: ", args.c) - '; - } ---- request -GET /a ---- response_body -POST -a: 1 -b: 2 -c: 3 ---- no_error_log -[error] -[warn] - - -=== TEST 3: 100 Continue does not end requset ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - body = "a=1&b=2&c=3", - path = "/b", - headers = { - ["Expect"] = "100-continue", - ["Content-Type"] = "application/x-www-form-urlencoded", - } - } - ngx.say(res.status) - ngx.say(res:read_body()) - httpc:close() - '; - } - location = /b { - content_by_lua ' - ngx.req.read_body() - local args = ngx.req.get_post_args() - ngx.say("a: ", args.a) - ngx.say("b: ", args.b) - ngx.print("c: ", args.c) - '; - } ---- request -GET /a ---- response_body -200 -a: 1 -b: 2 -c: 3 ---- no_error_log -[error] -[warn] - -=== TEST 4: Return non-100 status to user ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - headers = { - ["Expect"] = "100-continue", - ["Content-Type"] = "application/x-www-form-urlencoded", - } - } - if not res then - ngx.say(err) - end - ngx.say(res.status) - ngx.say(res:read_body()) - httpc:close() - '; - } - location = /b { - return 417 "Expectation Failed"; - } ---- request -GET /a ---- response_body -417 -Expectation Failed ---- no_error_log -[error] -[warn] - diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/04-trailers.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/04-trailers.t deleted file mode 100644 index 8c4c17624..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/04-trailers.t +++ /dev/null @@ -1,151 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 4); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: Trailers. Check Content-MD5 generated after the body is sent matches up. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - headers = { - ["TE"] = "trailers", - } - } - - local body = res:read_body() - local hash = ngx.md5(body) - res:read_trailers() - - if res.headers["Content-MD5"] == hash then - ngx.say("OK") - else - ngx.say(res.headers["Content-MD5"]) - end - '; - } - location = /b { - content_by_lua ' - -- We use the raw socket to compose a response, since OpenResty - -- doesnt support trailers natively. - - ngx.req.read_body() - local sock, err = ngx.req.socket(true) - if not sock then - ngx.say(err) - end - - local res = {} - table.insert(res, "HTTP/1.1 200 OK") - table.insert(res, "Date: " .. ngx.http_time(ngx.time())) - table.insert(res, "Transfer-Encoding: chunked") - table.insert(res, "Trailer: Content-MD5") - table.insert(res, "") - - local body = "Hello, World" - - table.insert(res, string.format("%x", #body)) - table.insert(res, body) - table.insert(res, "0") - table.insert(res, "") - - table.insert(res, "Content-MD5: " .. ngx.md5(body)) - - table.insert(res, "") - table.insert(res, "") - sock:send(table.concat(res, "\\r\\n")) - '; - } ---- request -GET /a ---- response_body -OK ---- no_error_log -[error] -[warn] - - -=== TEST 2: Advertised trailer does not exist, handled gracefully. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - headers = { - ["TE"] = "trailers", - } - } - - local body = res:read_body() - local hash = ngx.md5(body) - res:read_trailers() - - ngx.say("OK") - httpc:close() - '; - } - location = /b { - content_by_lua ' - -- We use the raw socket to compose a response, since OpenResty - -- doesnt support trailers natively. - - ngx.req.read_body() - local sock, err = ngx.req.socket(true) - if not sock then - ngx.say(err) - end - - local res = {} - table.insert(res, "HTTP/1.1 200 OK") - table.insert(res, "Date: " .. ngx.http_time(ngx.time())) - table.insert(res, "Transfer-Encoding: chunked") - table.insert(res, "Trailer: Content-MD5") - table.insert(res, "") - - local body = "Hello, World" - - table.insert(res, string.format("%x", #body)) - table.insert(res, body) - table.insert(res, "0") - - table.insert(res, "") - table.insert(res, "") - sock:send(table.concat(res, "\\r\\n")) - '; - } ---- request -GET /a ---- response_body -OK ---- no_error_log -[error] -[warn] - - diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/05-stream.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/05-stream.t deleted file mode 100644 index 67b1c269c..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/05-stream.t +++ /dev/null @@ -1,566 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 4) - 1; - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: Chunked streaming body reader returns the right content length. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - } - - local chunks = {} - repeat - local chunk = res.body_reader() - if chunk then - table.insert(chunks, chunk) - end - until not chunk - - local body = table.concat(chunks) - ngx.say(#body) - ngx.say(res.headers["Transfer-Encoding"]) - - httpc:close() - '; - } - location = /b { - content_by_lua ' - local len = 32768 - local t = {} - for i=1,len do - t[i] = 0 - end - ngx.print(table.concat(t)) - '; - } ---- request -GET /a ---- response_body -32768 -chunked ---- no_error_log -[error] -[warn] - - -=== TEST 2: Non-Chunked streaming body reader returns the right content length. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - } - - local chunks = {} - repeat - local chunk = res.body_reader() - if chunk then - table.insert(chunks, chunk) - end - until not chunk - - local body = table.concat(chunks) - ngx.say(#body) - ngx.say(res.headers["Transfer-Encoding"]) - ngx.say(#chunks) - - httpc:close() - '; - } - location = /b { - chunked_transfer_encoding off; - content_by_lua ' - local len = 32768 - local t = {} - for i=1,len do - t[i] = 0 - end - ngx.print(table.concat(t)) - '; - } ---- request -GET /a ---- response_body -32768 -nil -1 ---- no_error_log -[error] -[warn] - - -=== TEST 2b: Non-Chunked streaming body reader, buffer size becomes nil ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - } - - local chunks = {} - local buffer_size = 16384 - repeat - local chunk = res.body_reader(buffer_size) - if chunk then - table.insert(chunks, chunk) - end - - buffer_size = nil - until not chunk - - local body = table.concat(chunks) - ngx.say(res.headers["Transfer-Encoding"]) - - httpc:close() - '; - } - location = /b { - chunked_transfer_encoding off; - content_by_lua ' - local len = 32768 - local t = {} - for i=1,len do - t[i] = 0 - end - ngx.print(table.concat(t)) - '; - } ---- request -GET /a ---- response_body -nil ---- error_log -Buffer size not specified, bailing - - -=== TEST 3: HTTP 1.0 body reader with no max size returns the right content length. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - version = 1.0, - } - - local chunks = {} - repeat - local chunk = res.body_reader() - if chunk then - table.insert(chunks, chunk) - end - until not chunk - - local body = table.concat(chunks) - ngx.say(#body) - ngx.say(res.headers["Transfer-Encoding"]) - ngx.say(#chunks) - - httpc:close() - '; - } - location = /b { - chunked_transfer_encoding off; - content_by_lua ' - local len = 32768 - local t = {} - for i=1,len do - t[i] = 0 - end - ngx.print(table.concat(t)) - '; - } ---- request -GET /a ---- response_body -32768 -nil -1 ---- no_error_log -[error] -[warn] - - -=== TEST 4: HTTP 1.0 body reader with max chunk size returns the right content length. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - version = 1.0, - } - - local chunks = {} - local size = 8192 - repeat - local chunk = res.body_reader(size) - if chunk then - table.insert(chunks, chunk) - end - size = size + size - until not chunk - - local body = table.concat(chunks) - ngx.say(#body) - ngx.say(res.headers["Transfer-Encoding"]) - ngx.say(#chunks) - - httpc:close() - '; - } - location = /b { - chunked_transfer_encoding off; - content_by_lua ' - local len = 32769 - local t = {} - for i=1,len do - t[i] = 0 - end - ngx.print(table.concat(t)) - '; - } ---- request -GET /a ---- response_body -32769 -nil -3 ---- no_error_log -[error] -[warn] - - -=== TEST 4b: HTTP 1.0 body reader with no content length, stream works as expected. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - version = 1.0, - } - - local chunks = {} - local size = 8192 - repeat - local chunk = res.body_reader(size) - if chunk then - table.insert(chunks, chunk) - end - size = size + size - until not chunk - - local body = table.concat(chunks) - ngx.say(#body) - ngx.say(#chunks) - - httpc:close() - '; - } - location = /b { - content_by_lua ' - ngx.req.read_body() - local sock, err = ngx.req.socket(true) - if not sock then - ngx.say(err) - end - - local res = {} - table.insert(res, "HTTP/1.0 200 OK") - table.insert(res, "Date: " .. ngx.http_time(ngx.time())) - table.insert(res, "") - - local len = 32769 - local t = {} - for i=1,len do - t[i] = 0 - end - table.insert(res, table.concat(t)) - sock:send(table.concat(res, "\\r\\n")) - '; - } ---- request -GET /a ---- response_body -32769 -3 ---- no_error_log -[error] -[warn] - - -=== TEST 5: Chunked streaming body reader with max chunk size returns the right content length. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - } - - local chunks = {} - local size = 8192 - repeat - local chunk = res.body_reader(size) - if chunk then - table.insert(chunks, chunk) - end - size = size + size - until not chunk - - local body = table.concat(chunks) - ngx.say(#body) - ngx.say(res.headers["Transfer-Encoding"]) - ngx.say(#chunks) - - httpc:close() - '; - } - location = /b { - content_by_lua ' - local len = 32768 - local t = {} - for i=1,len do - t[i] = 0 - end - ngx.print(table.concat(t)) - '; - } ---- request -GET /a ---- response_body -32768 -chunked -3 ---- no_error_log -[error] -[warn] - - -=== TEST 6: Request reader correctly reads body ---- http_config eval: $::HttpConfig ---- config - location = /a { - lua_need_request_body off; - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - - local reader, err = httpc:get_client_body_reader(8192) - - repeat - local chunk, err = reader() - if chunk then - ngx.print(chunk) - end - until chunk == nil - - '; - } - ---- request -POST /a -foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz ---- response_body: foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz ---- no_error_log -[error] -[warn] - -=== TEST 7: Request reader correctly reads body in chunks ---- http_config eval: $::HttpConfig ---- config - location = /a { - lua_need_request_body off; - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - - local reader, err = httpc:get_client_body_reader(64) - - local chunks = 0 - repeat - chunks = chunks +1 - local chunk, err = reader() - if chunk then - ngx.print(chunk) - end - until chunk == nil - ngx.say("\\n"..chunks) - '; - } - ---- request -POST /a -foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz ---- response_body -foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz -3 ---- no_error_log -[error] -[warn] - - -=== TEST 8: Request reader passes into client ---- http_config eval: $::HttpConfig ---- config - location = /a { - lua_need_request_body off; - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local reader, err = httpc:get_client_body_reader(64) - - local res, err = httpc:request{ - method = POST, - path = "/b", - body = reader, - headers = ngx.req.get_headers(100, true), - } - - local body = res:read_body() - ngx.say(body) - httpc:close() - - '; - } - - location = /b { - content_by_lua ' - ngx.req.read_body() - local body, err = ngx.req.get_body_data() - ngx.print(body) - '; - } - ---- request -POST /a -foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz ---- response_body -foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz ---- no_error_log -[error] -[warn] - - -=== TEST 9: Body reader is a function returning nil when no body is present. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - method = "HEAD", - } - - repeat - local chunk = res.body_reader() - until not chunk - - httpc:close() - '; - } - location = /b { - content_by_lua ' - ngx.exit(200) - '; - } ---- request -GET /a ---- no_error_log -[error] -[warn] - - -=== TEST 10: Issue a notice (but do not error) if trying to read the request body in a subrequest ---- http_config eval: $::HttpConfig ---- config - location = /a { - echo_location /b; - } - location = /b { - lua_need_request_body off; - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - - local reader, err = httpc:get_client_body_reader(8192) - if not reader then - ngx.log(ngx.NOTICE, err) - return - end - - repeat - local chunk, err = reader() - if chunk then - ngx.print(chunk) - end - until chunk == nil - '; - } - ---- request -POST /a -foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz ---- response_body: ---- no_error_log -[error] -[warn] ---- error_log -attempt to read the request body in a subrequest diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/06-simpleinterface.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/06-simpleinterface.t deleted file mode 100644 index 6e1af37f9..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/06-simpleinterface.t +++ /dev/null @@ -1,145 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 4) + 6; - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - error_log logs/error.log debug; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: Simple URI interface ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - local res, err = httpc:request_uri("http://127.0.0.1:"..ngx.var.server_port.."/b?a=1&b=2") - - if not res then - ngx.log(ngx.ERR, err) - end - ngx.status = res.status - - ngx.header["X-Header-A"] = res.headers["X-Header-A"] - ngx.header["X-Header-B"] = res.headers["X-Header-B"] - - ngx.print(res.body) - '; - } - location = /b { - content_by_lua ' - for k,v in pairs(ngx.req.get_uri_args()) do - ngx.header["X-Header-" .. string.upper(k)] = v - end - ngx.say("OK") - '; - } ---- request -GET /a ---- response_headers -X-Header-A: 1 -X-Header-B: 2 ---- response_body -OK ---- no_error_log -[error] -[warn] - - -=== TEST 2: Simple URI interface HTTP 1.0 ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - local res, err = httpc:request_uri( - "http://127.0.0.1:"..ngx.var.server_port.."/b?a=1&b=2", { - } - ) - - ngx.status = res.status - - ngx.header["X-Header-A"] = res.headers["X-Header-A"] - ngx.header["X-Header-B"] = res.headers["X-Header-B"] - - ngx.print(res.body) - '; - } - location = /b { - content_by_lua ' - for k,v in pairs(ngx.req.get_uri_args()) do - ngx.header["X-Header-" .. string.upper(k)] = v - end - ngx.say("OK") - '; - } ---- request -GET /a ---- response_headers -X-Header-A: 1 -X-Header-B: 2 ---- response_body -OK ---- no_error_log -[error] -[warn] - - -=== TEST 3 Simple URI interface, params override ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - local res, err = httpc:request_uri( - "http://127.0.0.1:"..ngx.var.server_port.."/b?a=1&b=2", { - path = "/c", - query = { - a = 2, - b = 3, - }, - } - ) - - ngx.status = res.status - - ngx.header["X-Header-A"] = res.headers["X-Header-A"] - ngx.header["X-Header-B"] = res.headers["X-Header-B"] - - ngx.print(res.body) - '; - } - location = /c { - content_by_lua ' - for k,v in pairs(ngx.req.get_uri_args()) do - ngx.header["X-Header-" .. string.upper(k)] = v - end - ngx.say("OK") - '; - } ---- request -GET /a ---- response_headers -X-Header-A: 2 -X-Header-B: 3 ---- response_body -OK ---- no_error_log -[error] -[warn] diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/07-keepalive.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/07-keepalive.t deleted file mode 100644 index c8bda1407..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/07-keepalive.t +++ /dev/null @@ -1,240 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 4); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - error_log logs/error.log debug; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1 Simple interface, Connection: Keep-alive. Test the connection is reused. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - local res, err = httpc:request_uri( - "http://127.0.0.1:"..ngx.var.server_port.."/b", { - } - ) - - ngx.say(res.headers["Connection"]) - - httpc:connect("127.0.0.1", ngx.var.server_port) - ngx.say(httpc:get_reused_times()) - '; - } - location = /b { - content_by_lua ' - ngx.say("OK") - '; - } ---- request -GET /a ---- response_body -keep-alive -1 ---- no_error_log -[error] -[warn] - - -=== TEST 2 Simple interface, Connection: close, test we don't try to keepalive, but also that subsequent connections can keepalive. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - local res, err = httpc:request_uri( - "http://127.0.0.1:"..ngx.var.server_port.."/b", { - version = 1.0, - headers = { - ["Connection"] = "close", - }, - } - ) - - httpc:connect("127.0.0.1", ngx.var.server_port) - ngx.say(httpc:get_reused_times()) - - httpc:set_keepalive() - - httpc:connect("127.0.0.1", ngx.var.server_port) - ngx.say(httpc:get_reused_times()) - '; - } - location = /b { - content_by_lua ' - ngx.say("OK") - '; - } ---- request -GET /a ---- response_body -0 -1 ---- no_error_log -[error] -[warn] - - -=== TEST 3 Generic interface, Connection: Keep-alive. Test the connection is reused. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b" - } - - local body = res:read_body() - - ngx.say(res.headers["Connection"]) - ngx.say(httpc:set_keepalive()) - - httpc:connect("127.0.0.1", ngx.var.server_port) - ngx.say(httpc:get_reused_times()) - '; - } - location = /b { - content_by_lua ' - ngx.say("OK") - '; - } ---- request -GET /a ---- response_body -keep-alive -1 -1 ---- no_error_log -[error] -[warn] - - -=== TEST 4 Generic interface, Connection: Close. Test we don't try to keepalive, but also that subsequent connections can keepalive. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - version = 1.0, - headers = { - ["Connection"] = "Close", - }, - path = "/b" - } - - local body = res:read_body() - - ngx.say(res.headers["Connection"]) - local r, e = httpc:set_keepalive() - ngx.say(r) - ngx.say(e) - - httpc:connect("127.0.0.1", ngx.var.server_port) - ngx.say(httpc:get_reused_times()) - - httpc:set_keepalive() - - httpc:connect("127.0.0.1", ngx.var.server_port) - ngx.say(httpc:get_reused_times()) - '; - } - location = /b { - content_by_lua ' - ngx.say("OK") - '; - } ---- request -GET /a ---- response_body -close -2 -connection must be closed -0 -1 ---- no_error_log -[error] -[warn] - - -=== TEST 5: Generic interface, HTTP 1.0, no connection header. Test we don't try to keepalive, but also that subsequent connections can keepalive. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", 12345) - - local res, err = httpc:request{ - version = 1.0, - path = "/b" - } - - local body = res:read_body() - ngx.print(body) - - ngx.say(res.headers["Connection"]) - - local r, e = httpc:set_keepalive() - ngx.say(r) - ngx.say(e) - - httpc:connect("127.0.0.1", ngx.var.server_port) - ngx.say(httpc:get_reused_times()) - - httpc:set_keepalive() - - httpc:connect("127.0.0.1", ngx.var.server_port) - ngx.say(httpc:get_reused_times()) - '; - } - location = /b { - content_by_lua ' - ngx.say("OK") - '; - } ---- request -GET /a ---- tcp_listen: 12345 ---- tcp_reply -HTTP/1.0 200 OK -Date: Fri, 08 Aug 2016 08:12:31 GMT -Server: OpenResty - -OK ---- response_body -OK -nil -2 -connection must be closed -0 -1 ---- no_error_log -[error] -[warn] diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/08-pipeline.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/08-pipeline.t deleted file mode 100644 index 1b24020a1..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/08-pipeline.t +++ /dev/null @@ -1,143 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 4); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - error_log logs/error.log debug; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1 Test that pipelined reqests can be read correctly. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local responses = httpc:request_pipeline{ - { - path = "/b", - }, - { - path = "/c", - }, - { - path = "/d", - } - } - - for i,r in ipairs(responses) do - if r.status then - ngx.say(r.status) - ngx.say(r.headers["X-Res"]) - ngx.say(r:read_body()) - end - end - '; - } - location = /b { - content_by_lua ' - ngx.status = 200 - ngx.header["X-Res"] = "B" - ngx.print("B") - '; - } - location = /c { - content_by_lua ' - ngx.status = 404 - ngx.header["X-Res"] = "C" - ngx.print("C") - '; - } - location = /d { - content_by_lua ' - ngx.status = 200 - ngx.header["X-Res"] = "D" - ngx.print("D") - '; - } ---- request -GET /a ---- response_body -200 -B -B -404 -C -C -200 -D -D ---- no_error_log -[error] -[warn] - - -=== TEST 2: Test we can handle timeouts on reading the pipelined requests. ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - httpc:set_timeout(1) - - local responses = httpc:request_pipeline{ - { - path = "/b", - }, - { - path = "/c", - }, - } - - for i,r in ipairs(responses) do - if r.status then - ngx.say(r.status) - ngx.say(r.headers["X-Res"]) - ngx.say(r:read_body()) - end - end - '; - } - location = /b { - content_by_lua ' - ngx.status = 200 - ngx.header["X-Res"] = "B" - ngx.print("B") - '; - } - location = /c { - content_by_lua ' - ngx.status = 404 - ngx.header["X-Res"] = "C" - ngx.sleep(1) - ngx.print("C") - '; - } ---- request -GET /a ---- response_body -200 -B -B ---- no_error_log -[warn] ---- error_log eval -[qr/timeout/] diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/09-ssl.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/09-ssl.t deleted file mode 100644 index bc46514b4..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/09-ssl.t +++ /dev/null @@ -1,59 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 4); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - error_log logs/error.log debug; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: parse_uri returns port 443 for https URIs ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - local parsed = httpc:parse_uri("https://www.google.com/foobar") - ngx.say(parsed[3]) - '; - } ---- request -GET /a ---- response_body -443 ---- no_error_log -[error] -[warn] - -=== TEST 2: parse_uri returns port 80 for http URIs ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - local parsed = httpc:parse_uri("http://www.google.com/foobar") - ngx.say(parsed[3]) - '; - } ---- request -GET /a ---- response_body -80 ---- no_error_log -[error] -[warn] diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/10-clientbodyreader.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/10-clientbodyreader.t deleted file mode 100644 index e67c82305..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/10-clientbodyreader.t +++ /dev/null @@ -1,57 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 4); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: Issue a notice (but do not error) if trying to read the request body in a subrequest ---- http_config eval: $::HttpConfig ---- config - location = /a { - echo_location /b; - } - location = /b { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/c", - headers = { - ["Content-Type"] = "application/x-www-form-urlencoded", - } - } - if not res then - ngx.say(err) - end - ngx.print(res:read_body()) - httpc:close() - '; - } - location /c { - echo "OK"; - } ---- request -GET /a ---- response_body -OK ---- no_error_log -[error] -[warn] - diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/11-proxy.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/11-proxy.t deleted file mode 100644 index d42a06c89..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/11-proxy.t +++ /dev/null @@ -1,152 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 5); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - error_log logs/error.log debug; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: Proxy GET request and response ---- http_config eval: $::HttpConfig ---- config - location = /a_prx { - rewrite ^(.*)_prx$ $1 break; - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - httpc:proxy_response(httpc:proxy_request()) - httpc:set_keepalive() - '; - } - location = /a { - content_by_lua ' - ngx.status = 200 - ngx.header["X-Test"] = "foo" - ngx.say("OK") - '; - } ---- request -GET /a_prx ---- response_body -OK ---- response_headers -X-Test: foo ---- error_code: 200 ---- no_error_log -[error] -[warn] - - -=== TEST 2: Proxy POST request and response ---- http_config eval: $::HttpConfig ---- config - location = /a_prx { - rewrite ^(.*)_prx$ $1 break; - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - httpc:proxy_response(httpc:proxy_request()) - httpc:set_keepalive() - '; - } - location = /a { - lua_need_request_body on; - content_by_lua ' - ngx.status = 404 - ngx.header["X-Test"] = "foo" - local args, err = ngx.req.get_post_args() - ngx.say(args["foo"]) - ngx.say(args["hello"]) - '; - } ---- request -POST /a_prx -foo=bar&hello=world ---- response_body -bar -world ---- response_headers -X-Test: foo ---- error_code: 404 ---- no_error_log -[error] -[warn] - - -=== TEST 3: Proxy multiple headers ---- http_config eval: $::HttpConfig ---- config - location = /a_prx { - rewrite ^(.*)_prx$ $1 break; - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - httpc:proxy_response(httpc:proxy_request()) - httpc:set_keepalive() - '; - } - location = /a { - content_by_lua ' - ngx.status = 200 - ngx.header["Set-Cookie"] = { "cookie1", "cookie2" } - ngx.say("OK") - '; - } ---- request -GET /a_prx ---- response_body -OK ---- raw_response_headers_like: .*Set-Cookie: cookie1\r\nSet-Cookie: cookie2\r\n ---- error_code: 200 ---- no_error_log -[error] -[warn] - - -=== TEST 4: Proxy still works with spaces in URI ---- http_config eval: $::HttpConfig ---- config - location = "/a_ b_prx" { - rewrite ^(.*)_prx$ $1 break; - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - httpc:proxy_response(httpc:proxy_request()) - httpc:set_keepalive() - '; - } - location = "/a_ b" { - content_by_lua ' - ngx.status = 200 - ngx.header["X-Test"] = "foo" - ngx.say("OK") - '; - } ---- request -GET /a_%20b_prx ---- response_body -OK ---- response_headers -X-Test: foo ---- error_code: 200 ---- no_error_log -[error] -[warn] diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/12-case_insensitive_headers.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/12-case_insensitive_headers.t deleted file mode 100644 index 43bcbc151..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/12-case_insensitive_headers.t +++ /dev/null @@ -1,160 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 4); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - error_log logs/error.log debug; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: Test header normalisation ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http_headers = require "resty.http_headers" - - local headers = http_headers.new() - - headers.x_a_header = "a" - headers["x-b-header"] = "b" - headers["X-C-Header"] = "c" - headers["X_d-HEAder"] = "d" - - ngx.say(headers["X-A-Header"]) - ngx.say(headers.x_b_header) - - for k,v in pairs(headers) do - ngx.say(k, ": ", v) - end - '; - } ---- request -GET /a ---- response_body -a -b -x-b-header: b -x-a-header: a -X-d-HEAder: d -X-C-Header: c ---- no_error_log -[error] -[warn] - - -=== TEST 2: Test headers can be accessed in all cases ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b" - } - - ngx.status = res.status - ngx.say(res.headers["X-Foo-Header"]) - ngx.say(res.headers["x-fOo-heaDeR"]) - ngx.say(res.headers.x_foo_header) - - httpc:close() - '; - } - location = /b { - content_by_lua ' - ngx.header["X-Foo-Header"] = "bar" - ngx.say("OK") - '; - } ---- request -GET /a ---- response_body -bar -bar -bar ---- no_error_log -[error] -[warn] - - -=== TEST 3: Test request headers are normalised ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - httpc:connect("127.0.0.1", ngx.var.server_port) - - local res, err = httpc:request{ - path = "/b", - headers = { - ["uSeR-AgENT"] = "test_user_agent", - x_foo = "bar", - }, - } - - ngx.status = res.status - ngx.print(res:read_body()) - - httpc:close() - '; - } - location = /b { - content_by_lua ' - ngx.say(ngx.req.get_headers()["User-Agent"]) - ngx.say(ngx.req.get_headers()["X-Foo"]) - '; - } ---- request -GET /a ---- response_body -test_user_agent -bar ---- no_error_log -[error] - - -=== TEST 4: Test that headers remain unique ---- http_config eval: $::HttpConfig ---- config - location = /a { - content_by_lua ' - local http_headers = require "resty.http_headers" - - local headers = http_headers.new() - - headers["x-a-header"] = "a" - headers["X-A-HEAder"] = "b" - - for k,v in pairs(headers) do - ngx.log(ngx.DEBUG, k, ": ", v) - ngx.header[k] = v - end - '; - } ---- request -GET /a ---- response_headers -x-a-header: b ---- no_error_log -[error] -[warn] -[warn] diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/13-default-path.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/13-default-path.t deleted file mode 100644 index 94f45250b..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/13-default-path.t +++ /dev/null @@ -1,52 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 3); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - error_log logs/error.log debug; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: request_uri (check the default path) ---- http_config eval: $::HttpConfig ---- config - location /lua { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - - local res, err = httpc:request_uri("http://127.0.0.1:"..ngx.var.server_port) - - if res and 200 == res.status then - ngx.say("OK") - else - ngx.say("FAIL") - end - '; - } - - location =/ { - content_by_lua ' - ngx.print("OK") - '; - } ---- request -GET /lua ---- response_body -OK ---- no_error_log -[error] - diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/14-host-header.t b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/14-host-header.t deleted file mode 100644 index e3411ffe4..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/14-host-header.t +++ /dev/null @@ -1,161 +0,0 @@ -# vim:set ft= ts=4 sw=4 et: - -use Test::Nginx::Socket; -use Cwd qw(cwd); - -plan tests => repeat_each() * (blocks() * 3); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - error_log logs/error.log debug; - resolver 8.8.8.8; -}; - -$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; -$ENV{TEST_NGINX_PWD} ||= $pwd; - -sub read_file { - my $infile = shift; - open my $in, $infile - or die "cannot open $infile for reading: $!"; - my $cert = do { local $/; <$in> }; - close $in; - $cert; -} - -our $TestCertificate = read_file("t/cert/test.crt"); -our $TestCertificateKey = read_file("t/cert/test.key"); - -no_long_string(); -#no_diff(); - -run_tests(); - -__DATA__ -=== TEST 1: Default HTTP port is not added to Host header ---- http_config eval: $::HttpConfig ---- config - location /lua { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - - local res, err = httpc:request_uri("http://www.google.com") - '; - } ---- request -GET /lua ---- no_error_log -[error] ---- error_log -Host: www.google.com - - -=== TEST 2: Default HTTPS port is not added to Host header ---- http_config eval: $::HttpConfig ---- config - location /lua { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - - local res, err = httpc:request_uri("https://www.google.com:443", { ssl_verify = false }) - '; - } ---- request -GET /lua ---- no_error_log -[error] ---- error_log -Host: www.google.com - - -=== TEST 3: Non-default HTTP port is added to Host header ---- http_config - lua_package_path "$TEST_NGINX_PWD/lib/?.lua;;"; - error_log logs/error.log debug; - resolver 8.8.8.8; - server { - listen *:8080; - } ---- config - location /lua { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - - local res, err = httpc:request_uri("http://127.0.0.1:8080") - '; - } ---- request -GET /lua ---- no_error_log -[error] ---- error_log -Host: 127.0.0.1:8080 - - -=== TEST 4: Non-default HTTPS port is added to Host header ---- http_config - lua_package_path "$TEST_NGINX_PWD/lib/?.lua;;"; - error_log logs/error.log debug; - resolver 8.8.8.8; - server { - listen *:8080; - listen *:8081 ssl; - ssl_certificate ../html/test.crt; - ssl_certificate_key ../html/test.key; - } ---- config - location /lua { - content_by_lua ' - local http = require "resty.http" - local httpc = http.new() - - local res, err = httpc:request_uri("https://127.0.0.1:8081", { ssl_verify = false }) - '; - } ---- user_files eval -">>> test.key -$::TestCertificateKey ->>> test.crt -$::TestCertificate" ---- request -GET /lua ---- no_error_log -[error] ---- error_log -Host: 127.0.0.1:8081 - - -=== TEST 5: No host header on a unix domain socket returns a useful error. ---- http_config eval: $::HttpConfig ---- config - location /a { - content_by_lua_block { - local http = require "resty.http" - local httpc = http.new() - - local res, err = httpc:connect("unix:test.sock") - if not res then - ngx.log(ngx.ERR, err) - end - - local res, err = httpc:request({ path = "/" }) - if not res then - ngx.say(err) - else - ngx.say(res:read_body()) - end - } - } ---- tcp_listen: test.sock ---- tcp_reply: OK ---- request -GET /a ---- no_error_log -[error] ---- response_body -Unable to generate a useful Host header for a unix domain socket. Please provide one. diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/cert/test.crt b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/cert/test.crt deleted file mode 100644 index ae1134461..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/cert/test.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8DCCAtigAwIBAgIJALL9eJPZ6neGMA0GCSqGSIb3DQEBBQUAMFgxCzAJBgNV -BAYTAkdCMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ0wCwYDVQQKEwRU -ZXN0MQ0wCwYDVQQLEwRUZXN0MQ0wCwYDVQQDEwR0ZXN0MB4XDTE1MTAyMTE2MjQ1 -NloXDTE1MTEyMDE2MjQ1NlowWDELMAkGA1UEBhMCR0IxDTALBgNVBAgTBFRlc3Qx -DTALBgNVBAcTBFRlc3QxDTALBgNVBAoTBFRlc3QxDTALBgNVBAsTBFRlc3QxDTAL -BgNVBAMTBHRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDz/AoE -c+TPdm+Aqcchq8fLNWksFQZqbsCBGnq8rUG1b6MsVlAOkDUQGRlNPs9v0/+pzgX7 -IYXPCFcV7YONNsTUfvBYTq43mfOycmAdb3SX6kBygxdhYsDRZR+vCAIkjoRmRB20 -meh1motqM58spq3IcT8VADTRJl1OI48VTnxmXdCtmkOymU948DcauMoxm03eL/hU -6eniNEujbnbB305noNG0W5c3h6iz9CvqUAD1kwyjick+f1atB2YYn1bymA+db6YN -3iTo0v2raWmIc7D+qqpkNaCRxgMb2HN6X3/SfkijtNJidjqHMbs2ftlKJ5/lODPZ -rCPQOcYK6TT8MIZ1AgMBAAGjgbwwgbkwHQYDVR0OBBYEFFUC1GrAhUp7IvJH5iyf -+fJQliEIMIGJBgNVHSMEgYEwf4AUVQLUasCFSnsi8kfmLJ/58lCWIQihXKRaMFgx -CzAJBgNVBAYTAkdCMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ0wCwYD -VQQKEwRUZXN0MQ0wCwYDVQQLEwRUZXN0MQ0wCwYDVQQDEwR0ZXN0ggkAsv14k9nq -d4YwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAtaUQOr3Qn87KXmmP -GbSvCLSl+bScE09VYZsYaB6iq0pGN9y+Vh4/HjBUUsFexopw1dY25MEEJXEVi1xV -2krLYAsfKCM6c1QBVmdqfVuxUvxpXwr+CNRNAlzz6PhjkeY/Ds/j4sg7EqN8hMmT -gu8GuogX7+ZCgrzRSMMclWej+W8D1xSIuCC+rqv4w9SZdtVb3XGpCyizpTNsQAuV -ACXvq9KXkEEj+XNvKrNdWd4zG715RdMnVm+WM53d9PLp63P+4/kwhwHULYhXygQ3 -DzzVPaojBBdw3VaHbbPHnv73FtAzOb7ky6zJ01DlmEPxEahCFpklMkY9T2uCdpj9 -oOzaNA== ------END CERTIFICATE----- diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/cert/test.key b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/cert/test.key deleted file mode 100644 index 4fcd0dae8..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/t/cert/test.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA8/wKBHPkz3ZvgKnHIavHyzVpLBUGam7AgRp6vK1BtW+jLFZQ -DpA1EBkZTT7Pb9P/qc4F+yGFzwhXFe2DjTbE1H7wWE6uN5nzsnJgHW90l+pAcoMX -YWLA0WUfrwgCJI6EZkQdtJnodZqLajOfLKatyHE/FQA00SZdTiOPFU58Zl3QrZpD -splPePA3GrjKMZtN3i/4VOnp4jRLo252wd9OZ6DRtFuXN4eos/Qr6lAA9ZMMo4nJ -Pn9WrQdmGJ9W8pgPnW+mDd4k6NL9q2lpiHOw/qqqZDWgkcYDG9hzel9/0n5Io7TS -YnY6hzG7Nn7ZSief5Tgz2awj0DnGCuk0/DCGdQIDAQABAoIBAGjKc7L94+SHRdTJ -FtILacCJrCZW0W6dKulIajbnYzV+QWMlnzTiEyha31ciBw5My54u8rqt5z7Ioj60 -yK+6OkfaTXhgMsuGv/iAz29VE4q7/fow+7XEKHTHLhiLJAB3hb42u1t6TzFTs1Vl -3pPa8wEIQsPOVuENzT1mYGoST7PW+LBIMr9ScMnRHfC0MNdV/ntQiXideOAd5PkA -4O7fNgYZ8CTAZ8rOLYTMFF76/c/jLiqfeghqbIhqMykk36kd7Lud//FRykVsn1aJ -REUva/SjVEth5kITot1hpMC4SIElWpha2YxiiZFoSXSaUbtHpymiUGV01cYtMWk0 -MZ5HN3ECgYEA/74U8DpwPxd4up9syKyNqOqrCrYnhEEC/tdU/W5wECi4y5kppjdd -88lZzICVPzk2fezYXlCO9HiSHU1UfcEsY3u16qNCvylK7Qz1OqXV/Ncj59891Q5Z -K0UBcbnrv+YD6muZuhlHEbyDPqYO091G9Gf/BbL5JIBDzg1qFO9Dh9cCgYEA9Drt -O9PJ5Sjz3mXQVtVHpwyhOVnd7CUv8a1zkUQCK5uQeaiF5kal1FIo7pLOr3KAvG0C -pXbm/TobwlfAfcERQN88aPN8Z/l1CB0oKV6ipBMD2/XLzDRtx8lpTeh/BB8jIhrz -+FDJY54HCzLfW0P5kT+Cyw51ofjziPnFdO/Z6pMCgYEAon17gEchGnUnWCwDSl2Y -hELV+jBSW02TQag/b+bDfQDiqTnfpKR5JXRBghYQveL0JH5f200EB4C0FboUfPJH -6c2ogDTLK/poiMU66tCDbeqj/adx+fTr4votOL0QdRUIV+GWAxAcf8BvA1cvBJ4L -fy60ckKM2gxFCJ6tUC/VkHECgYBoMDNAUItSnXPbrmeAg5/7naGxy6qmsP6RBUPF -9tNOMyEhJUlqAT2BJEOd8zcFFb3hpEd6uwyzfnSVJcZSX2iy2gj1ZNnvqTXJ7lZR -v7N2dz4wOd1lEgC7OCsaN1LoOThNtl3Z0uz2+FVc66jpUEhJNGThpxt7q66JArS/ -vAqkzQKBgFkzqA6QpnH5KhOCoZcuLQ4MtvnNHOx1xSm2B0gKDVJzGkHexTmOJvwM -ZhHXRl9txS4icejS+AGUXNBzCWEusfhDaZpZqS6zt6UxEjMsLj/Te7z++2KQn4t/ -aI77jClydW1pJvICtqm5v+sukVZvQTTJza9ujta6fj7u2s671np9 ------END RSA PRIVATE KEY----- diff --git a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/util/lua-releng b/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/util/lua-releng deleted file mode 100755 index 453443bc7..000000000 --- a/controllers/nginx/rootfs/etc/nginx/lua/vendor/lua-resty-http/util/lua-releng +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env perl - -use strict; -use warnings; - -sub file_contains ($$); - -my $version; -for my $file (map glob, qw{ *.lua lib/*.lua lib/*/*.lua lib/*/*/*.lua lib/*/*/*/*.lua lib/*/*/*/*/*.lua }) { - # Check the sanity of each .lua file - open my $in, $file or - die "ERROR: Can't open $file for reading: $!\n"; - my $found_ver; - while (<$in>) { - my ($ver, $skipping); - if (/(?x) (?:_VERSION) \s* = .*? ([\d\.]*\d+) (.*? SKIP)?/) { - my $orig_ver = $ver = $1; - $found_ver = 1; - # $skipping = $2; - $ver =~ s{^(\d+)\.(\d{3})(\d{3})$}{join '.', int($1), int($2), int($3)}e; - warn "$file: $orig_ver ($ver)\n"; - - } elsif (/(?x) (?:_VERSION) \s* = \s* ([a-zA-Z_]\S*)/) { - warn "$file: $1\n"; - $found_ver = 1; - last; - } - - if ($ver and $version and !$skipping) { - if ($version ne $ver) { - # die "$file: $ver != $version\n"; - } - } elsif ($ver and !$version) { - $version = $ver; - } - } - if (!$found_ver) { - warn "WARNING: No \"_VERSION\" or \"version\" field found in `$file`.\n"; - } - close $in; - - print "Checking use of Lua global variables in file $file ...\n"; - system("luac -p -l $file | grep ETGLOBAL | grep -vE 'require|type|tostring|error|ngx|ndk|jit|setmetatable|getmetatable|string|table|io|os|print|tonumber|math|pcall|xpcall|unpack|pairs|ipairs|assert|module|package|coroutine|[gs]etfenv|next|select|rawset|rawget|debug'"); - #file_contains($file, "attempt to write to undeclared variable"); - system("grep -H -n -E --color '.{120}' $file"); -} - -sub file_contains ($$) { - my ($file, $regex) = @_; - open my $in, $file - or die "Cannot open $file fo reading: $!\n"; - my $content = do { local $/; <$in> }; - close $in; - #print "$content"; - return scalar ($content =~ /$regex/); -} - -if (-d 't') { - for my $file (map glob, qw{ t/*.t t/*/*.t t/*/*/*.t }) { - system(qq{grep -H -n --color -E '\\--- ?(ONLY|LAST)' $file}); - } -} - diff --git a/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl b/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl index c971fc0bf..9e0c18965 100644 --- a/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl +++ b/controllers/nginx/rootfs/etc/nginx/template/nginx.tmpl @@ -1,8 +1,11 @@ +{{ $all := . }} +{{ $servers := .Servers }} {{ $cfg := .Cfg }} {{ $IsIPV6Enabled := .IsIPV6Enabled }} {{ $healthzURI := .HealthzURI }} {{ $backends := .Backends }} {{ $proxyHeaders := .ProxySetHeaders }} +{{ $addHeaders := .AddHeaders }} daemon off; worker_processes {{ $cfg.WorkerProcesses }}; @@ -11,6 +14,10 @@ pid /run/nginx.pid; worker_rlimit_nofile {{ .MaxOpenFiles }}; {{ end}} +{{/* http://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout */}} +{{/* avoid waiting too long during a reload */}} +worker_shutdown_timeout {{ $cfg.WorkerShutdownTimeout }} ; + events { multi_accept on; worker_connections {{ $cfg.MaxWorkerConnections }}; @@ -20,14 +27,15 @@ events { http { {{/* we use the value of the header X-Forwarded-For to be able to use the geo_ip module */}} {{ if $cfg.UseProxyProtocol }} - set_real_ip_from {{ $cfg.ProxyRealIPCIDR }}; real_ip_header proxy_protocol; {{ else }} - set_real_ip_from {{ $cfg.ProxyRealIPCIDR }}; - real_ip_header X-Forwarded-For; + real_ip_header {{ $cfg.ForwardedForHeader }}; {{ end }} real_ip_recursive on; + {{ range $trusted_ip := $cfg.ProxyRealIPCIDR }} + set_real_ip_from {{ $trusted_ip }}; + {{ end }} {{/* databases used to determine the country depending on the client IP address */}} {{/* http://nginx.org/en/docs/http/ngx_http_geoip_module.html */}} @@ -41,14 +49,11 @@ http { vhost_traffic_status_filter_by_set_key $geoip_country_code country::*; {{ end }} - # lua section to return proper error codes when custom pages are used - lua_package_path '.?.lua;/etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/lua-resty-http/lib/?.lua;'; - init_by_lua_block { - require("error_page") - } - sendfile on; + aio threads; + aio_write on; + tcp_nopush on; tcp_nodelay on; @@ -60,8 +65,10 @@ http { keepalive_requests {{ $cfg.KeepAliveRequests }}; client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }}; + client_header_timeout {{ $cfg.ClientHeaderTimeout }}s; large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }}; client_body_buffer_size {{ $cfg.ClientBodyBufferSize }}; + client_body_timeout {{ $cfg.ClientBodyTimeout }}s; http2_max_field_size {{ $cfg.HTTP2MaxFieldSize }}; http2_max_header_size {{ $cfg.HTTP2MaxHeaderSize }}; @@ -71,6 +78,9 @@ http { server_names_hash_bucket_size {{ $cfg.ServerNameHashBucketSize }}; map_hash_bucket_size {{ $cfg.MapHashBucketSize }}; + proxy_headers_hash_max_size {{ $cfg.ProxyHeadersHashMaxSize }}; + proxy_headers_hash_bucket_size {{ $cfg.ProxyHeadersHashBucketSize }}; + variables_hash_bucket_size {{ $cfg.VariablesHashBucketSize }}; variables_hash_max_size {{ $cfg.VariablesHashMaxSize }}; @@ -88,12 +98,21 @@ http { gzip_proxied any; {{ end }} + # Custom headers for response + {{ range $k, $v := $addHeaders }} + add_header {{ $k }} "{{ $v }}"; + {{ end }} + server_tokens {{ if $cfg.ShowServerTokens }}on{{ else }}off{{ end }}; # disable warnings uninitialized_variable_warn off; - log_format upstreaminfo {{ if $cfg.LogFormatEscapeJson }}escape=json {{ end }}'{{ buildLogFormatUpstream $cfg }}'; + # Additional available variables: + # $namespace + # $ingress_name + # $service_name + log_format upstreaminfo {{ if $cfg.LogFormatEscapeJSON }}escape=json {{ end }}'{{ buildLogFormatUpstream $cfg }}'; {{/* map urls that should not appear in access.log */}} {{/* http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log */}} @@ -106,9 +125,9 @@ http { {{ if $cfg.DisableAccessLog }} access_log off; {{ else }} - access_log /var/log/nginx/access.log upstreaminfo if=$loggable; + access_log {{ $cfg.AccessLogPath }} upstreaminfo if=$loggable; {{ end }} - error_log /var/log/nginx/error.log {{ $cfg.ErrorLogLevel }}; + error_log {{ $cfg.ErrorLogPath }} {{ $cfg.ErrorLogLevel }}; {{ buildResolvers $cfg.Resolver }} @@ -135,21 +154,28 @@ http { '' $server_port; } - map $pass_access_scheme $the_x_forwarded_for { - default $remote_addr; - https $proxy_protocol_addr; + map {{ buildForwardedFor $cfg.ForwardedForHeader }} $the_real_ip { + default {{ buildForwardedFor $cfg.ForwardedForHeader }}; + "~*(?[0-9\.]+).*" $ip; + {{ if $cfg.UseProxyProtocol }} + '' $proxy_protocol_addr; + {{ else }} + '' $realip_remote_addr; + {{ end }} } - map $pass_access_scheme $the_real_ip { - default $remote_addr; - https $proxy_protocol_addr; - } - - # map port 442 to 443 for header X-Forwarded-Port + {{ if $all.IsSSLPassthroughEnabled }} + # map port {{ $all.ListenPorts.SSLProxy }} to 443 for header X-Forwarded-Port map $pass_server_port $pass_port { - 442 443; + {{ $all.ListenPorts.SSLProxy }} 443; default $pass_server_port; } + {{ else }} + map $pass_server_port $pass_port { + 443 443; + default $pass_server_port; + } + {{ end }} # Map a response error watching the header Content-Type map $http_accept $httpAccept { @@ -167,11 +193,16 @@ http { } # Obtain best http host - map $http_host $best_http_host { + map $http_host $this_host { default $http_host; '' $host; } + map $http_x_forwarded_host $best_http_host { + default $http_x_forwarded_host; + '' $this_host; + } + server_name_in_redirect off; port_in_redirect off; @@ -214,37 +245,52 @@ http { {{ range $errCode := $cfg.CustomHTTPErrors }} error_page {{ $errCode }} = @custom_{{ $errCode }};{{ end }} - # In case of errors try the next upstream server before returning an error - proxy_next_upstream error timeout invalid_header http_502 http_503 http_504{{ if $cfg.RetryNonIdempotent }} non_idempotent{{ end }}; - proxy_ssl_session_reuse on; {{ if $cfg.AllowBackendServerHeader }} proxy_pass_header Server; {{ end }} - {{range $name, $upstream := $backends}} - upstream {{$upstream.Name}} { - {{ if eq $upstream.SessionAffinity.AffinityType "cookie" }} - sticky hash={{$upstream.SessionAffinity.CookieSessionAffinity.Hash}} name={{$upstream.SessionAffinity.CookieSessionAffinity.Name}} httponly; - {{ else }} + {{ range $name, $upstream := $backends }} + {{ if eq $upstream.SessionAffinity.AffinityType "cookie" }} + upstream sticky-{{ $upstream.Name }} { + sticky hash={{ $upstream.SessionAffinity.CookieSessionAffinity.Hash }} name={{ $upstream.SessionAffinity.CookieSessionAffinity.Name }} httponly; + + {{ if (gt $cfg.UpstreamKeepaliveConnections 0) }} + keepalive {{ $cfg.UpstreamKeepaliveConnections }}; + {{ end }} + + {{ range $server := $upstream.Endpoints }}server {{ $server.Address | formatIP }}:{{ $server.Port }} max_fails={{ $server.MaxFails }} fail_timeout={{ $server.FailTimeout }}; + {{ end }} + } + + {{ end }} + + upstream {{ $upstream.Name }} { # Load balance algorithm; empty for round robin, which is the default {{ if ne $cfg.LoadBalanceAlgorithm "round_robin" }} {{ $cfg.LoadBalanceAlgorithm }}; {{ end }} + + {{ if (gt $cfg.UpstreamKeepaliveConnections 0) }} + keepalive {{ $cfg.UpstreamKeepaliveConnections }}; {{ end }} + {{ range $server := $upstream.Endpoints }}server {{ $server.Address | formatIP }}:{{ $server.Port }} max_fails={{ $server.MaxFails }} fail_timeout={{ $server.FailTimeout }}; {{ end }} } + {{ end }} {{/* build the maps that will be use to validate the Whitelist */}} - {{ range $index, $server := .Servers }} + {{ range $index, $server := $servers }} {{ range $location := $server.Locations }} {{ $path := buildLocation $location }} {{ if isLocationAllowed $location }} {{ if gt (len $location.Whitelist.CIDR) 0 }} + + # Deny for {{ print $server.Hostname $path }} geo $the_real_ip {{ buildDenyVariable (print $server.Hostname "_" $path) }} { default 1; @@ -256,225 +302,77 @@ http { {{ end }} {{ end }} + {{ range $rl := (filterRateLimits $servers ) }} + # Ratelimit {{ $rl.Name }} + geo $the_real_ip $whitelist_{{ $rl.ID }} { + default 0; + {{ range $ip := $rl.Whitelist }} + {{ $ip }} 1;{{ end }} + } + + # Ratelimit {{ $rl.Name }} + map $whitelist_{{ $rl.ID }} $limit_{{ $rl.ID }} { + 0 {{ $cfg.LimitConnZoneVariable }}; + 1 ""; + } + {{ end }} + {{/* build all the required rate limit zones. Each annotation requires a dedicated zone */}} {{/* 1MB -> 16 thousand 64-byte states or about 8 thousand 128-byte states */}} - {{ range $zone := (buildRateLimitZones .Servers) }} + {{ range $zone := (buildRateLimitZones $servers) }} {{ $zone }} {{ end }} - {{ $backlogSize := .BacklogSize }} - {{ range $index, $server := .Servers }} + {{/* Build server redirects (from/to www) */}} + {{ range $hostname, $to := .RedirectServers }} + server { + {{ range $address := $all.Cfg.BindAddressIpv4 }} + listen {{ $address }}:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}; + listen {{ $address }}:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol{{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }} ssl; + {{ else }} + listen {{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}; + listen {{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol{{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }} ssl; + {{ end }} + {{ if $IsIPV6Enabled }} + {{ range $address := $all.Cfg.BindAddressIpv6 }} + listen {{ $address }}:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}; + listen {{ $address }}:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol{{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }}; + {{ else }} + listen [::]:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}; + listen [::]:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol{{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }}; + {{ end }} + {{ end }} + server_name {{ $hostname }}; + return 301 $scheme://{{ $to }}$request_uri; + } + {{ end }} + + {{ range $index, $server := $servers }} server { server_name {{ $server.Hostname }}; - listen 80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}}; - {{ if $IsIPV6Enabled }}listen [::]:80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{ end }};{{ end }} - set $proxy_upstream_name "-"; - - {{/* Listen on 442 because port 443 is used in the TLS sni server */}} - {{/* This listener must always have proxy_protocol enabled, because the SNI listener forwards on source IP info in it. */}} - {{ if not (empty $server.SSLCertificate) }}listen 442 proxy_protocol{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }}; - {{ if $IsIPV6Enabled }}{{ if not (empty $server.SSLCertificate) }}listen [::]:442 proxy_protocol{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};{{ end }} - {{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}} - # PEM sha: {{ $server.SSLPemChecksum }} - ssl_certificate {{ $server.SSLCertificate }}; - ssl_certificate_key {{ $server.SSLCertificate }}; - {{ end }} - - {{ if (and (not (empty $server.SSLCertificate)) $cfg.HSTS) }} - more_set_headers "Strict-Transport-Security: max-age={{ $cfg.HSTSMaxAge }}{{ if $cfg.HSTSIncludeSubdomains }}; includeSubDomains{{ end }};{{ if $cfg.HSTSPreload }} preload{{ end }}"; - {{ end }} - - {{ if $cfg.EnableVtsStatus }}vhost_traffic_status_filter_by_set_key $geoip_country_code country::$server_name;{{ end }} - - {{ if not (empty $server.ServerSnippet) }} - {{ $server.ServerSnippet }} - {{ end }} - - {{ range $location := $server.Locations }} - {{ $path := buildLocation $location }} - {{ $authPath := buildAuthLocation $location }} - - {{ if not (empty $location.CertificateAuth.AuthSSLCert.CAFileName) }} - # PEM sha: {{ $location.CertificateAuth.AuthSSLCert.PemSHA }} - ssl_client_certificate {{ $location.CertificateAuth.AuthSSLCert.CAFileName }}; - ssl_verify_client on; - ssl_verify_depth {{ $location.CertificateAuth.ValidationDepth }}; - {{ end }} - - {{ if (or $location.Redirect.ForceSSLRedirect (and (not (empty $server.SSLCertificate)) $location.Redirect.SSLRedirect)) }} - # enforce ssl on server side - if ($pass_access_scheme = http) { - return 301 https://$best_http_host$request_uri; - } - {{ end }} - - {{ if not (empty $location.Redirect.AppRoot)}} - if ($uri = /) { - return 302 {{ $location.Redirect.AppRoot }}; - } - {{ end }} - {{ if not (empty $authPath) }} - location = {{ $authPath }} { - internal; - set $proxy_upstream_name "internal"; - - {{ if not $location.ExternalAuth.SendBody }} - proxy_pass_request_body off; - proxy_set_header Content-Length ""; - {{ end }} - {{ if not (empty $location.ExternalAuth.Method) }} - proxy_method {{ $location.ExternalAuth.Method }}; - proxy_set_header X-Original-URI $request_uri; - proxy_set_header X-Scheme $pass_access_scheme; - {{ end }} - proxy_pass_request_headers on; - proxy_set_header Host {{ $location.ExternalAuth.Host }}; - proxy_ssl_server_name on; - - client_max_body_size "{{ $location.Proxy.BodySize }}"; + {{ template "SERVER" serverConfig $all $server }} - set $target {{ $location.ExternalAuth.URL }}; - proxy_pass $target; - } - {{ end }} - - location {{ $path }} { - set $proxy_upstream_name "{{ $location.Backend }}"; - - {{ if isLocationAllowed $location }} - {{ if gt (len $location.Whitelist.CIDR) 0 }} - if ({{ buildDenyVariable (print $server.Hostname "_" $path) }}) { - return 403; - } - {{ end }} - - port_in_redirect {{ if $location.UsePortInRedirects }}on{{ else }}off{{ end }}; - - {{ if not (empty $authPath) }} - # this location requires authentication - auth_request {{ $authPath }}; - {{- range $idx, $line := buildAuthResponseHeaders $location }} - {{ $line }} - {{- end }} - {{ end }} - - {{ if not (empty $location.ExternalAuth.SigninURL) }} - error_page 401 = {{ $location.ExternalAuth.SigninURL }}; - {{ end }} - - - {{/* if the location contains a rate limit annotation, create one */}} - {{ $limits := buildRateLimit $location }} - {{ range $limit := $limits }} - {{ $limit }}{{ end }} - - {{ if $location.BasicDigestAuth.Secured }} - {{ if eq $location.BasicDigestAuth.Type "basic" }} - auth_basic "{{ $location.BasicDigestAuth.Realm }}"; - auth_basic_user_file {{ $location.BasicDigestAuth.File }}; - {{ else }} - auth_digest "{{ $location.BasicDigestAuth.Realm }}"; - auth_digest_user_file {{ $location.BasicDigestAuth.File }}; - {{ end }} - proxy_set_header Authorization ""; - {{ end }} - - {{ if $location.EnableCORS }} - {{ template "CORS" }} - {{ end }} - - client_max_body_size "{{ $location.Proxy.BodySize }}"; - - proxy_set_header Host $best_http_host; - - # Pass the extracted client certificate to the backend - {{ if not (empty $location.CertificateAuth.AuthSSLCert.CAFileName) }} - proxy_set_header ssl-client-cert $ssl_client_cert; - {{ end }} - - # Allow websocket connections - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - - proxy_set_header X-Real-IP $the_real_ip; - proxy_set_header X-Forwarded-For $the_x_forwarded_for; - proxy_set_header X-Forwarded-Host $best_http_host; - proxy_set_header X-Forwarded-Port $pass_port; - proxy_set_header X-Forwarded-Proto $pass_access_scheme; - proxy_set_header X-Original-URI $request_uri; - proxy_set_header X-Scheme $pass_access_scheme; - - # mitigate HTTPoxy Vulnerability - # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/ - proxy_set_header Proxy ""; - - # Custom headers - {{ range $k, $v := $proxyHeaders }} - proxy_set_header {{ $k }} "{{ $v }}"; - {{ end }} - - proxy_connect_timeout {{ $location.Proxy.ConnectTimeout }}s; - proxy_send_timeout {{ $location.Proxy.SendTimeout }}s; - proxy_read_timeout {{ $location.Proxy.ReadTimeout }}s; - - proxy_redirect off; - proxy_buffering off; - proxy_buffer_size "{{ $location.Proxy.BufferSize }}"; - proxy_buffers 4 "{{ $location.Proxy.BufferSize }}"; - - proxy_http_version 1.1; - - proxy_cookie_domain {{ $location.Proxy.CookieDomain }}; - proxy_cookie_path {{ $location.Proxy.CookiePath }}; - - {{/* rewrite only works if the content is not compressed */}} - {{ if $location.Redirect.AddBaseURL }} - proxy_set_header Accept-Encoding ""; - {{ end }} - - {{/* Add any additional configuration defined */}} - {{ $location.ConfigurationSnippet }} - - {{ buildProxyPass $backends $location }} - {{ else }} - #{{ $location.Denied }} - return 503; - {{ end }} - } - {{ end }} - - {{ if eq $server.Hostname "_" }} - # health checks in cloud providers require the use of port 80 - location {{ $healthzURI }} { - access_log off; - return 200; - } - - # this is required to avoid error if nginx is being monitored - # with an external software (like sysdig) - location /nginx_status { - allow 127.0.0.1; - {{ if $IsIPV6Enabled }}allow ::1;{{ end }} - deny all; - - access_log off; - stub_status on; - } - {{ end }} - - {{ template "CUSTOM_ERRORS" $cfg }} + {{ template "CUSTOM_ERRORS" $all }} } + {{ if $server.Alias }} + server { + server_name {{ $server.Alias }}; + {{ template "SERVER" serverConfig $all $server }} + + {{ template "CUSTOM_ERRORS" $all }} + } + {{ end }} {{ end }} # default server, used for NGINX healthcheck and access to nginx stats server { - # Use the port 18080 (random value just to avoid known ports) as default port for nginx. + # Use the port {{ $all.ListenPorts.Status }} (random value just to avoid known ports) as default port for nginx. # Changing this value requires a change in: - # https://github.com/kubernetes/contrib/blob/master/ingress/controllers/nginx/nginx/command.go#L104 - listen 18080 default_server reuseport backlog={{ .BacklogSize }}; - {{ if $IsIPV6Enabled }}listen [::]:18080 default_server reuseport backlog={{ .BacklogSize }};{{ end }} + # https://github.com/kubernetes/ingress/blob/master/controllers/nginx/pkg/cmd/controller/nginx.go + listen {{ $all.ListenPorts.Status }} default_server reuseport backlog={{ $all.BacklogSize }}; + {{ if $IsIPV6Enabled }}listen [::]:{{ $all.ListenPorts.Status }} default_server reuseport backlog={{ $all.BacklogSize }};{{ end }} set $proxy_upstream_name "-"; location {{ $healthzURI }} { @@ -494,41 +392,15 @@ http { {{ end }} } - # this location is used to extract nginx metrics - # using prometheus. - # TODO: enable extraction for vts module. - location /internal_nginx_status { - set $proxy_upstream_name "internal"; - - allow 127.0.0.1; - {{ if not $cfg.DisableIpv6 }}allow ::1;{{ end }} - deny all; - - access_log off; - stub_status on; - } - - location / { - set $proxy_upstream_name "upstream-default-backend"; - proxy_pass http://upstream-default-backend; - } - {{ template "CUSTOM_ERRORS" $cfg }} - } - - # default server for services without endpoints - server { - listen 8181; - set $proxy_upstream_name "-"; - location / { {{ if .CustomErrors }} - content_by_lua_block { - openURL(ngx.req.get_headers(0), 503) - } - {{ else }} - return 503; + proxy_set_header X-Code 404; {{ end }} + set $proxy_upstream_name "upstream-default-backend"; + proxy_pass http://upstream-default-backend; } + + {{ template "CUSTOM_ERRORS" $all }} } } @@ -538,51 +410,81 @@ stream { {{ if $cfg.DisableAccessLog }} access_log off; {{ else }} - access_log /var/log/nginx/access.log log_stream; + access_log {{ $cfg.AccessLogPath }} log_stream; {{ end }} - error_log /var/log/nginx/error.log; + error_log {{ $cfg.ErrorLogPath }}; # TCP services {{ range $i, $tcpServer := .TCPBackends }} - upstream tcp-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }} { + upstream tcp-{{ $tcpServer.Port }}-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }} { {{ range $j, $endpoint := $tcpServer.Endpoints }} server {{ $endpoint.Address }}:{{ $endpoint.Port }}; {{ end }} } - server { - listen {{ $tcpServer.Port }}; - {{ if $IsIPV6Enabled }}listen [::]:{{ $tcpServer.Port }};{{ end }} - proxy_pass tcp-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }}; + {{ range $address := $all.Cfg.BindAddressIpv4 }} + listen {{ $address }}:{{ $tcpServer.Port }}{{ if $tcpServer.Backend.UseProxyProtocol }} proxy_protocol{{ end }}; + {{ else }} + listen {{ $tcpServer.Port }}{{ if $tcpServer.Backend.UseProxyProtocol }} proxy_protocol{{ end }}; + {{ end }} + {{ if $IsIPV6Enabled }} + {{ range $address := $all.Cfg.BindAddressIpv6 }} + listen {{ $address }}:{{ $tcpServer.Port }}{{ if $tcpServer.Backend.UseProxyProtocol }} proxy_protocol{{ end }}; + {{ else }} + listen [::]:{{ $tcpServer.Port }}{{ if $tcpServer.Backend.UseProxyProtocol }} proxy_protocol{{ end }}; + {{ end }} + {{ end }} + proxy_timeout {{ $cfg.ProxyStreamTimeout }}; + proxy_pass tcp-{{ $tcpServer.Port }}-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }}; } + {{ end }} # UDP services {{ range $i, $udpServer := .UDPBackends }} - upstream udp-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }} { + upstream udp-{{ $udpServer.Port }}-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }} { {{ range $j, $endpoint := $udpServer.Endpoints }} server {{ $endpoint.Address }}:{{ $endpoint.Port }}; {{ end }} } server { + {{ range $address := $all.Cfg.BindAddressIpv4 }} + listen {{ $address }}:{{ $udpServer.Port }} udp; + {{ else }} listen {{ $udpServer.Port }} udp; - {{ if $IsIPV6Enabled }}listen [::]:{{ $udpServer.Port }} udp;{{ end }} + {{ end }} + {{ if $IsIPV6Enabled }} + {{ range $address := $all.Cfg.BindAddressIpv6 }} + listen {{ $address }}:{{ $udpServer.Port }} udp; + {{ else }} + listen [::]:{{ $udpServer.Port }} udp; + {{ end }} + {{ end }} proxy_responses 1; - proxy_pass udp-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }}; + proxy_timeout {{ $cfg.ProxyStreamTimeout }}; + proxy_pass udp-{{ $udpServer.Port }}-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }}; } + {{ end }} } {{/* definition of templates to avoid repetitions */}} {{ define "CUSTOM_ERRORS" }} - {{ range $errCode := .CustomHTTPErrors }} + {{ $proxySetHeaders := .ProxySetHeaders }} + {{ range $errCode := .Cfg.CustomHTTPErrors }} location @custom_{{ $errCode }} { internal; - content_by_lua_block { - openURL(ngx.req.get_headers(0), {{ $errCode }}) - } + + proxy_set_header X-Code {{ $errCode }}; + proxy_set_header X-Format $http_accept; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header X-Namespace $namespace; + proxy_set_header X-Ingress-Name $ingress_name; + proxy_set_header X-Service-Name $service_name; + + proxy_pass http://upstream-default-backend; } {{ end }} {{ end }} @@ -595,7 +497,7 @@ stream { # Om nom nom cookies # add_header 'Access-Control-Allow-Credentials' 'true'; - add_header 'Access-Control-Allow-Methods' 'GET, PUT, POST, DELETE, OPTIONS'; + add_header 'Access-Control-Allow-Methods' 'GET, PUT, POST, DELETE, PATCH, OPTIONS'; # # Custom headers and headers various browsers *should* be OK with but aren't # @@ -621,11 +523,281 @@ stream { if ($request_method = 'DELETE') { set $cors_method 1; } + if ($request_method = 'PATCH') { + set $cors_method 1; + } if ($cors_method = 1) { add_header 'Access-Control-Allow-Origin' '*' always; add_header 'Access-Control-Allow-Credentials' 'true'; - add_header 'Access-Control-Allow-Methods' 'GET, PUT, POST, DELETE, OPTIONS'; + add_header 'Access-Control-Allow-Methods' 'GET, PUT, POST, DELETE, PATCH, OPTIONS'; add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization'; } {{ end }} + +{{/* definition of server-template to avoid repetitions with server-alias */}} +{{ define "SERVER" }} + {{ $all := .First }} + {{ $server := .Second }} + {{ range $address := $all.Cfg.BindAddressIpv4 }} + listen {{ $address }}:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}}; + {{ else }} + listen {{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}}; + {{ end }} + {{ if $all.IsIPV6Enabled }} + {{ range $address := $all.Cfg.BindAddressIpv6 }} + listen {{ $address }}:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{ end }}; + {{ else }} + listen [::]:{{ $all.ListenPorts.HTTP }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{ end }}; + {{ end }} + {{ end }} + set $proxy_upstream_name "-"; + + {{/* Listen on {{ $all.ListenPorts.SSLProxy }} because port {{ $all.ListenPorts.HTTPS }} is used in the TLS sni server */}} + {{/* This listener must always have proxy_protocol enabled, because the SNI listener forwards on source IP info in it. */}} + {{ if not (empty $server.SSLCertificate) }} + {{ range $address := $all.Cfg.BindAddressIpv4 }} + listen {{ $address }}:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol {{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; + {{ else }} + listen {{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol {{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; + {{ end }} + {{ if $all.IsIPV6Enabled }} + {{ range $address := $all.Cfg.BindAddressIpv6 }} + {{ if not (empty $server.SSLCertificate) }}listen {{ $address }}:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol{{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; + {{ else }} + {{ if not (empty $server.SSLCertificate) }}listen [::]:{{ if $all.IsSSLPassthroughEnabled }}{{ $all.ListenPorts.SSLProxy }} proxy_protocol{{ else }}{{ $all.ListenPorts.HTTPS }}{{ if $all.Cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $all.BacklogSize }}{{end}} ssl {{ if $all.Cfg.UseHTTP2 }}http2{{ end }}; + {{ end }} + {{ end }} + {{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}} + # PEM sha: {{ $server.SSLPemChecksum }} + ssl_certificate {{ $server.SSLCertificate }}; + ssl_certificate_key {{ $server.SSLCertificate }}; + {{ end }} + + {{ if (and (not (empty $server.SSLCertificate)) $all.Cfg.HSTS) }} + more_set_headers "Strict-Transport-Security: max-age={{ $all.Cfg.HSTSMaxAge }}{{ if $all.Cfg.HSTSIncludeSubdomains }}; includeSubDomains{{ end }};{{ if $all.Cfg.HSTSPreload }} preload{{ end }}"; + {{ end }} + + {{ if $all.Cfg.EnableVtsStatus }}vhost_traffic_status_filter_by_set_key $geoip_country_code country::$server_name;{{ end }} + + {{ if not (empty $server.CertificateAuth.CAFileName) }} + # PEM sha: {{ $server.CertificateAuth.PemSHA }} + ssl_client_certificate {{ $server.CertificateAuth.CAFileName }}; + ssl_verify_client on; + ssl_verify_depth {{ $server.CertificateAuth.ValidationDepth }}; + {{ if not (empty $server.CertificateAuth.ErrorPage)}} + error_page 495 496 = {{ $server.CertificateAuth.ErrorPage }}; + {{ end }} + {{ end }} + + {{ if not (empty $server.ServerSnippet) }} + {{ $server.ServerSnippet }} + {{ end }} + + {{ range $location := $server.Locations }} + {{ $path := buildLocation $location }} + {{ $authPath := buildAuthLocation $location }} + + {{ if not (empty $location.Rewrite.AppRoot)}} + if ($uri = /) { + return 302 {{ $location.Rewrite.AppRoot }}; + } + {{ end }} + + {{ if not (empty $authPath) }} + location = {{ $authPath }} { + internal; + set $proxy_upstream_name "internal"; + + {{ if not $location.ExternalAuth.SendBody }} + proxy_pass_request_body off; + proxy_set_header Content-Length ""; + {{ end }} + {{ if not (empty $location.ExternalAuth.Method) }} + proxy_method {{ $location.ExternalAuth.Method }}; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header X-Scheme $pass_access_scheme; + {{ end }} + proxy_pass_request_headers on; + proxy_set_header Host {{ $location.ExternalAuth.Host }}; + proxy_ssl_server_name on; + + client_max_body_size "{{ $location.Proxy.BodySize }}"; + {{ if isValidClientBodyBufferSize $location.ClientBodyBufferSize }} + client_body_buffer_size {{ $location.ClientBodyBufferSize }}; + {{ end }} + + set $target {{ $location.ExternalAuth.URL }}; + proxy_pass $target; + } + + {{ end }} + + location {{ $path }} { + set $proxy_upstream_name "{{ buildUpstreamName $server.Hostname $all.Backends $location }}"; + + {{ $ing := (getIngressInformation $location.Ingress $path) }} + {{/* $ing.Metadata contains the Ingress metadata */}} + set $namespace "{{ $ing.Namespace }}"; + set $ingress_name "{{ $ing.Rule }}"; + set $service_name "{{ $ing.Service }}"; + + {{ if (or $location.Rewrite.ForceSSLRedirect (and (not (empty $server.SSLCertificate)) $location.Rewrite.SSLRedirect)) }} + # enforce ssl on server side + if ($pass_access_scheme = http) { + return 301 https://$best_http_host$request_uri; + } + {{ end }} + + {{ if isLocationAllowed $location }} + {{ if gt (len $location.Whitelist.CIDR) 0 }} + if ({{ buildDenyVariable (print $server.Hostname "_" $path) }}) { + return 403; + } + {{ end }} + + port_in_redirect {{ if $location.UsePortInRedirects }}on{{ else }}off{{ end }}; + + {{ if not (empty $authPath) }} + # this location requires authentication + auth_request {{ $authPath }}; + auth_request_set $auth_cookie $upstream_http_set_cookie; + add_header Set-Cookie $auth_cookie; + {{- range $idx, $line := buildAuthResponseHeaders $location }} + {{ $line }} + {{- end }} + {{ end }} + + {{ if not (empty $location.ExternalAuth.SigninURL) }} + error_page 401 = {{ buildAuthSignURL $location.ExternalAuth.SigninURL }}; + {{ end }} + + {{/* if the location contains a rate limit annotation, create one */}} + {{ $limits := buildRateLimit $location }} + {{ range $limit := $limits }} + {{ $limit }}{{ end }} + + {{ if $location.BasicDigestAuth.Secured }} + {{ if eq $location.BasicDigestAuth.Type "basic" }} + auth_basic "{{ $location.BasicDigestAuth.Realm }}"; + auth_basic_user_file {{ $location.BasicDigestAuth.File }}; + {{ else }} + auth_digest "{{ $location.BasicDigestAuth.Realm }}"; + auth_digest_user_file {{ $location.BasicDigestAuth.File }}; + {{ end }} + proxy_set_header Authorization ""; + {{ end }} + + {{ if $location.EnableCORS }} + {{ template "CORS" }} + {{ end }} + + {{ if not (empty $location.Redirect.URL) }} + if ($uri ~* {{ $path }}) { + return {{ $location.Redirect.Code }} {{ $location.Redirect.URL }}; + } + {{ end }} + + client_max_body_size "{{ $location.Proxy.BodySize }}"; + {{ if isValidClientBodyBufferSize $location.ClientBodyBufferSize }} + client_body_buffer_size {{ $location.ClientBodyBufferSize }}; + {{ end }} + + {{/* By default use vhost as Host to upstream, but allow overrides */}} + {{ if not (empty $location.UpstreamVhost) }} + proxy_set_header Host "{{ $location.UpstreamVhost }}"; + {{ else }} + proxy_set_header Host $best_http_host; + {{ end }} + + + # Pass the extracted client certificate to the backend + {{ if not (empty $server.CertificateAuth.CAFileName) }} + proxy_set_header ssl-client-cert $ssl_client_cert; + {{ end }} + + # Allow websocket connections + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + + proxy_set_header X-Real-IP $the_real_ip; + proxy_set_header X-Forwarded-For $the_real_ip; + proxy_set_header X-Forwarded-Host $best_http_host; + proxy_set_header X-Forwarded-Port $pass_port; + proxy_set_header X-Forwarded-Proto $pass_access_scheme; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header X-Scheme $pass_access_scheme; + + # mitigate HTTPoxy Vulnerability + # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/ + proxy_set_header Proxy ""; + + # Custom headers to proxied server + {{ range $k, $v := $all.ProxySetHeaders }} + proxy_set_header {{ $k }} "{{ $v }}"; + {{ end }} + + proxy_connect_timeout {{ $location.Proxy.ConnectTimeout }}s; + proxy_send_timeout {{ $location.Proxy.SendTimeout }}s; + proxy_read_timeout {{ $location.Proxy.ReadTimeout }}s; + + proxy_redirect off; + proxy_buffering off; + proxy_buffer_size "{{ $location.Proxy.BufferSize }}"; + proxy_buffers 4 "{{ $location.Proxy.BufferSize }}"; + proxy_request_buffering "{{ $location.Proxy.RequestBuffering }}"; + + proxy_http_version 1.1; + + proxy_cookie_domain {{ $location.Proxy.CookieDomain }}; + proxy_cookie_path {{ $location.Proxy.CookiePath }}; + + # In case of errors try the next upstream server before returning an error + proxy_next_upstream {{ buildNextUpstream $location.Proxy.NextUpstream }}{{ if $all.Cfg.RetryNonIdempotent }} non_idempotent{{ end }}; + + {{/* rewrite only works if the content is not compressed */}} + {{ if $location.Rewrite.AddBaseURL }} + proxy_set_header Accept-Encoding ""; + {{ end }} + + {{/* Add any additional configuration defined */}} + {{ $location.ConfigurationSnippet }} + + {{/* if we are sending the request to a custom default backend, we add the required headers */}} + {{ if (hasPrefix $location.Backend "custom-default-backend-") }} + proxy_set_header X-Code 503; + proxy_set_header X-Format $http_accept; + proxy_set_header X-Namespace $namespace; + proxy_set_header X-Ingress-Name $ingress_name; + proxy_set_header X-Service-Name $service_name; + {{ end }} + + {{ buildProxyPass $server.Hostname $all.Backends $location }} + {{ else }} + # Location denied. Reason: {{ $location.Denied }} + return 503; + {{ end }} + } + {{ end }} + + {{ if eq $server.Hostname "_" }} + # health checks in cloud providers require the use of port {{ $all.ListenPorts.HTTP }} + location {{ $all.HealthzURI }} { + access_log off; + return 200; + } + + # this is required to avoid error if nginx is being monitored + # with an external software (like sysdig) + location /nginx_status { + allow 127.0.0.1; + {{ if $all.IsIPV6Enabled }}allow ::1;{{ end }} + deny all; + + access_log off; + stub_status on; + } + + {{ end }} + +{{ end }} diff --git a/controllers/nginx/rootfs/ingress-controller/clean-nginx-conf.sh b/controllers/nginx/rootfs/ingress-controller/clean-nginx-conf.sh index 53662152f..0e109b7df 100755 --- a/controllers/nginx/rootfs/ingress-controller/clean-nginx-conf.sh +++ b/controllers/nginx/rootfs/ingress-controller/clean-nginx-conf.sh @@ -5,4 +5,4 @@ # first sed removes empty lines # second sed command replaces the empty lines -sed -e 's/^ *$/\'$'\n/g' | sed -e '/^$/{N;/^\n$/d;}' +sed -e 's/\r//g' | sed -e 's/^ *$/\'$'\n/g' | sed -e '/^$/{N;/^\n$/d;}' diff --git a/controllers/nginx/test/data/config.json b/controllers/nginx/test/data/config.json index 087a8fe83..c339b7759 100644 --- a/controllers/nginx/test/data/config.json +++ b/controllers/nginx/test/data/config.json @@ -38,8 +38,10 @@ "sslSessionTimeout": "10m", "useGzip": true, "useHttp2": true, + "proxyStreamTimeout": "600s", "vtsStatusZoneSize": "10m", - "workerProcesses": 1 + "workerProcesses": 1, + "limitConnZoneVariable": "$the_real_ip" }, "customErrors": true, "defResolver": "", @@ -94,7 +96,7 @@ "sslRedirect": true }, "whitelist": { - "cidr": [] + "cidr": ["1.1.1.1"] }, "proxy": { "conectTimeout": 5, @@ -144,7 +146,7 @@ "sslRedirect": false }, "whitelist": { - "cidr": null + "cidr": ["1.1.1.1"] }, "proxy": { "conectTimeout": 5, diff --git a/core/pkg/base64/base64.go b/core/pkg/base64/base64.go new file mode 100644 index 000000000..6c4480148 --- /dev/null +++ b/core/pkg/base64/base64.go @@ -0,0 +1,12 @@ +package base64 + +import ( + "encoding/base64" + "strings" +) + +// Encode encodes a string to base64 removing the equals character +func Encode(s string) string { + str := base64.URLEncoding.EncodeToString([]byte(s)) + return strings.Replace(str, "=", "", -1) +} diff --git a/core/pkg/file/file.go b/core/pkg/file/file.go new file mode 100644 index 000000000..17d194aa6 --- /dev/null +++ b/core/pkg/file/file.go @@ -0,0 +1,19 @@ +package file + +import ( + "crypto/sha1" + "encoding/hex" + "io/ioutil" +) + +// SHA1 returns the SHA1 of a file. +func SHA1(filename string) string { + hasher := sha1.New() + s, err := ioutil.ReadFile(filename) + if err != nil { + return "" + } + + hasher.Write(s) + return hex.EncodeToString(hasher.Sum(nil)) +} diff --git a/core/pkg/ingress/annotations/alias/main.go b/core/pkg/ingress/annotations/alias/main.go new file mode 100644 index 000000000..3f22f41e8 --- /dev/null +++ b/core/pkg/ingress/annotations/alias/main.go @@ -0,0 +1,41 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alias + +import ( + extensions "k8s.io/api/extensions/v1beta1" + + "k8s.io/ingress/core/pkg/ingress/annotations/parser" +) + +const ( + annotation = "ingress.kubernetes.io/server-alias" +) + +type alias struct { +} + +// NewParser creates a new Alias annotation parser +func NewParser() parser.IngressAnnotation { + return alias{} +} + +// Parse parses the annotations contained in the ingress rule +// used to add an alias to the provided hosts +func (a alias) Parse(ing *extensions.Ingress) (interface{}, error) { + return parser.GetStringAnnotation(annotation, ing) +} diff --git a/core/pkg/ingress/annotations/alias/main_test.go b/core/pkg/ingress/annotations/alias/main_test.go new file mode 100644 index 000000000..de4fe17f5 --- /dev/null +++ b/core/pkg/ingress/annotations/alias/main_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alias + +import ( + "testing" + + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestParse(t *testing.T) { + ap := NewParser() + if ap == nil { + t.Fatalf("expected a parser.IngressAnnotation but returned nil") + } + + testCases := []struct { + annotations map[string]string + expected string + }{ + {map[string]string{annotation: "www.example.com"}, "www.example.com"}, + {map[string]string{annotation: "*.example.com www.example.*"}, "*.example.com www.example.*"}, + {map[string]string{annotation: `~^www\d+\.example\.com$`}, `~^www\d+\.example\.com$`}, + {map[string]string{annotation: ""}, ""}, + {map[string]string{}, ""}, + {nil, ""}, + } + + ing := &extensions.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.IngressSpec{}, + } + + for _, testCase := range testCases { + ing.SetAnnotations(testCase.annotations) + result, _ := ap.Parse(ing) + if result != testCase.expected { + t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, result, testCase.annotations) + } + } +} diff --git a/core/pkg/ingress/annotations/auth/main.go b/core/pkg/ingress/annotations/auth/main.go index 5e8796e65..34e056c52 100644 --- a/core/pkg/ingress/annotations/auth/main.go +++ b/core/pkg/ingress/annotations/auth/main.go @@ -24,9 +24,10 @@ import ( "regexp" "github.com/pkg/errors" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/ingress/core/pkg/file" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ing_errors "k8s.io/ingress/core/pkg/ingress/errors" "k8s.io/ingress/core/pkg/ingress/resolver" @@ -51,6 +52,34 @@ type BasicDigest struct { Realm string `json:"realm"` File string `json:"file"` Secured bool `json:"secured"` + FileSHA string `json:"fileSha"` +} + +// Equal tests for equality between two BasicDigest types +func (bd1 *BasicDigest) Equal(bd2 *BasicDigest) bool { + if bd1 == bd2 { + return true + } + if bd1 == nil || bd2 == nil { + return false + } + if bd1.Type != bd2.Type { + return false + } + if bd1.Realm != bd2.Realm { + return false + } + if bd1.File != bd2.File { + return false + } + if bd1.Secured != bd2.Secured { + return false + } + if bd1.FileSHA != bd2.FileSHA { + return false + } + + return true } type auth struct { @@ -116,6 +145,7 @@ func (a auth) Parse(ing *extensions.Ingress) (interface{}, error) { Realm: realm, File: passFile, Secured: true, + FileSHA: file.SHA1(passFile), }, nil } diff --git a/core/pkg/ingress/annotations/auth/main_test.go b/core/pkg/ingress/annotations/auth/main_test.go index aec7549b3..df042ad4f 100644 --- a/core/pkg/ingress/annotations/auth/main_test.go +++ b/core/pkg/ingress/annotations/auth/main_test.go @@ -25,10 +25,10 @@ import ( "github.com/pkg/errors" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func buildIngress() *extensions.Ingress { diff --git a/core/pkg/ingress/annotations/authreq/main.go b/core/pkg/ingress/annotations/authreq/main.go index 8b156766e..4c46dd4d5 100644 --- a/core/pkg/ingress/annotations/authreq/main.go +++ b/core/pkg/ingress/annotations/authreq/main.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ing_errors "k8s.io/ingress/core/pkg/ingress/errors" @@ -44,7 +44,50 @@ type External struct { SigninURL string `json:"signinUrl"` Method string `json:"method"` SendBody bool `json:"sendBody"` - ResponseHeaders []string `json:"responseHeaders"` + ResponseHeaders []string `json:"responseHeaders,omitEmpty"` +} + +// Equal tests for equality between two External types +func (e1 *External) Equal(e2 *External) bool { + if e1 == e2 { + return true + } + if e1 == nil || e2 == nil { + return false + } + if e1.URL != e2.URL { + return false + } + if e1.Host != e2.Host { + return false + } + if e1.SigninURL != e2.SigninURL { + return false + } + if e1.Method != e2.Method { + return false + } + if e1.SendBody != e2.SendBody { + return false + } + if e1.Method != e2.Method { + return false + } + + for _, ep1 := range e1.ResponseHeaders { + found := false + for _, ep2 := range e2.ResponseHeaders { + if ep1 == ep2 { + found = true + break + } + } + if !found { + return false + } + } + + return true } var ( @@ -117,7 +160,7 @@ func (a authReq) Parse(ing *extensions.Ingress) (interface{}, error) { harr := strings.Split(hstr, ",") for _, header := range harr { - header := strings.TrimSpace(header) + header = strings.TrimSpace(header) if len(header) > 0 { if !validHeader(header) { return nil, ing_errors.NewLocationDenied("invalid headers list") diff --git a/core/pkg/ingress/annotations/authreq/main_test.go b/core/pkg/ingress/annotations/authreq/main_test.go index 12df0d93b..de9249443 100644 --- a/core/pkg/ingress/annotations/authreq/main_test.go +++ b/core/pkg/ingress/annotations/authreq/main_test.go @@ -21,9 +21,9 @@ import ( "reflect" "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/core/pkg/ingress/annotations/authtls/main.go b/core/pkg/ingress/annotations/authtls/main.go index 2de8b22c0..30a1a04fc 100644 --- a/core/pkg/ingress/annotations/authtls/main.go +++ b/core/pkg/ingress/annotations/authtls/main.go @@ -18,7 +18,7 @@ package authtls import ( "github.com/pkg/errors" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ing_errors "k8s.io/ingress/core/pkg/ingress/errors" @@ -28,16 +28,38 @@ import ( const ( // name of the secret - annotationAuthTLSSecret = "ingress.kubernetes.io/auth-tls-secret" - annotationAuthTLSDepth = "ingress.kubernetes.io/auth-tls-verify-depth" - defaultAuthTLSDepth = 1 + annotationAuthTLSSecret = "ingress.kubernetes.io/auth-tls-secret" + annotationAuthTLSDepth = "ingress.kubernetes.io/auth-tls-verify-depth" + annotationAuthTLSErrorPage = "ingress.kubernetes.io/auth-tls-error-page" + defaultAuthTLSDepth = 1 ) // AuthSSLConfig contains the AuthSSLCert used for muthual autentication // and the configured ValidationDepth type AuthSSLConfig struct { - AuthSSLCert resolver.AuthSSLCert - ValidationDepth int `json:"validationDepth"` + resolver.AuthSSLCert + ValidationDepth int `json:"validationDepth"` + ErrorPage string `json:"errorPage"` +} + +// Equal tests for equality between two AuthSSLConfig types +func (assl1 *AuthSSLConfig) Equal(assl2 *AuthSSLConfig) bool { + if assl1 == assl2 { + return true + } + if assl1 == nil || assl2 == nil { + return false + } + if !(&assl1.AuthSSLCert).Equal(&assl2.AuthSSLCert) { + return false + } + if assl1.ValidationDepth != assl2.ValidationDepth { + return false + } + if assl1.ErrorPage != assl2.ErrorPage { + return false + } + return true } // NewParser creates a new TLS authentication annotation parser @@ -64,7 +86,7 @@ func (a authTLS) Parse(ing *extensions.Ingress) (interface{}, error) { _, _, err = k8s.ParseNameNS(tlsauthsecret) if err != nil { - return &AuthSSLConfig{}, ing_errors.NewLocationDenied("an empty string is not a valid secret name") + return &AuthSSLConfig{}, ing_errors.NewLocationDenied(err.Error()) } tlsdepth, err := parser.GetIntAnnotation(annotationAuthTLSDepth, ing) @@ -79,8 +101,14 @@ func (a authTLS) Parse(ing *extensions.Ingress) (interface{}, error) { } } + errorpage, err := parser.GetStringAnnotation(annotationAuthTLSErrorPage, ing) + if err != nil || errorpage == "" { + errorpage = "" + } + return &AuthSSLConfig{ AuthSSLCert: *authCert, ValidationDepth: tlsdepth, + ErrorPage: errorpage, }, nil } diff --git a/core/pkg/ingress/annotations/authtls/main_test.go b/core/pkg/ingress/annotations/authtls/main_test.go index 50057503b..a3613bb15 100644 --- a/core/pkg/ingress/annotations/authtls/main_test.go +++ b/core/pkg/ingress/annotations/authtls/main_test.go @@ -19,10 +19,10 @@ package authtls import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func buildIngress() *extensions.Ingress { diff --git a/core/pkg/ingress/annotations/class/main.go b/core/pkg/ingress/annotations/class/main.go index 1c1066e16..2ff73914c 100644 --- a/core/pkg/ingress/annotations/class/main.go +++ b/core/pkg/ingress/annotations/class/main.go @@ -18,7 +18,7 @@ package class import ( "github.com/golang/glog" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/errors" diff --git a/core/pkg/ingress/annotations/class/main_test.go b/core/pkg/ingress/annotations/class/main_test.go index f48525078..45f7c02e7 100644 --- a/core/pkg/ingress/annotations/class/main_test.go +++ b/core/pkg/ingress/annotations/class/main_test.go @@ -19,9 +19,9 @@ package class import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func TestIsValidClass(t *testing.T) { diff --git a/core/pkg/ingress/annotations/clientbodybuffersize/main.go b/core/pkg/ingress/annotations/clientbodybuffersize/main.go new file mode 100644 index 000000000..bf67f819f --- /dev/null +++ b/core/pkg/ingress/annotations/clientbodybuffersize/main.go @@ -0,0 +1,41 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientbodybuffersize + +import ( + extensions "k8s.io/api/extensions/v1beta1" + + "k8s.io/ingress/core/pkg/ingress/annotations/parser" +) + +const ( + annotation = "ingress.kubernetes.io/client-body-buffer-size" +) + +type clientBodyBufferSize struct { +} + +// NewParser creates a new clientBodyBufferSize annotation parser +func NewParser() parser.IngressAnnotation { + return clientBodyBufferSize{} +} + +// Parse parses the annotations contained in the ingress rule +// used to add an client-body-buffer-size to the provided locations +func (a clientBodyBufferSize) Parse(ing *extensions.Ingress) (interface{}, error) { + return parser.GetStringAnnotation(annotation, ing) +} diff --git a/core/pkg/ingress/annotations/clientbodybuffersize/main_test.go b/core/pkg/ingress/annotations/clientbodybuffersize/main_test.go new file mode 100644 index 000000000..8ed6e0c38 --- /dev/null +++ b/core/pkg/ingress/annotations/clientbodybuffersize/main_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientbodybuffersize + +import ( + "testing" + + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestParse(t *testing.T) { + ap := NewParser() + if ap == nil { + t.Fatalf("expected a parser.IngressAnnotation but returned nil") + } + + testCases := []struct { + annotations map[string]string + expected string + }{ + {map[string]string{annotation: "8k"}, "8k"}, + {map[string]string{annotation: "16k"}, "16k"}, + {map[string]string{annotation: ""}, ""}, + {map[string]string{}, ""}, + {nil, ""}, + } + + ing := &extensions.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.IngressSpec{}, + } + + for _, testCase := range testCases { + ing.SetAnnotations(testCase.annotations) + result, _ := ap.Parse(ing) + if result != testCase.expected { + t.Errorf("expected %v but returned %v, annotations: %s", testCase.expected, result, testCase.annotations) + } + } +} diff --git a/core/pkg/ingress/annotations/cors/main.go b/core/pkg/ingress/annotations/cors/main.go index c460f8215..37aca7986 100644 --- a/core/pkg/ingress/annotations/cors/main.go +++ b/core/pkg/ingress/annotations/cors/main.go @@ -17,7 +17,7 @@ limitations under the License. package cors import ( - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ) diff --git a/core/pkg/ingress/annotations/cors/main_test.go b/core/pkg/ingress/annotations/cors/main_test.go index 480356ca8..330b059f4 100644 --- a/core/pkg/ingress/annotations/cors/main_test.go +++ b/core/pkg/ingress/annotations/cors/main_test.go @@ -19,9 +19,9 @@ package cors import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) const ( diff --git a/core/pkg/ingress/annotations/defaultbackend/main.go b/core/pkg/ingress/annotations/defaultbackend/main.go new file mode 100644 index 000000000..f9bb440c2 --- /dev/null +++ b/core/pkg/ingress/annotations/defaultbackend/main.go @@ -0,0 +1,57 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package defaultbackend + +import ( + "fmt" + + "github.com/pkg/errors" + extensions "k8s.io/api/extensions/v1beta1" + + "k8s.io/ingress/core/pkg/ingress/annotations/parser" + "k8s.io/ingress/core/pkg/ingress/resolver" +) + +const ( + defaultBackend = "ingress.kubernetes.io/default-backend" +) + +type backend struct { + serviceResolver resolver.Service +} + +// NewParser creates a new default backend annotation parser +func NewParser(sr resolver.Service) parser.IngressAnnotation { + return backend{sr} +} + +// Parse parses the annotations contained in the ingress to use +// a custom default backend +func (db backend) Parse(ing *extensions.Ingress) (interface{}, error) { + s, err := parser.GetStringAnnotation(defaultBackend, ing) + if err != nil { + return nil, err + } + + name := fmt.Sprintf("%v/%v", ing.Namespace, s) + svc, err := db.serviceResolver.GetService(name) + if err != nil { + return nil, errors.Wrapf(err, "unexpected error reading service %v", name) + } + + return svc, nil +} diff --git a/core/pkg/ingress/annotations/healthcheck/main.go b/core/pkg/ingress/annotations/healthcheck/main.go index 973e911dc..7b63f068d 100644 --- a/core/pkg/ingress/annotations/healthcheck/main.go +++ b/core/pkg/ingress/annotations/healthcheck/main.go @@ -17,7 +17,7 @@ limitations under the License. package healthcheck import ( - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/resolver" diff --git a/core/pkg/ingress/annotations/healthcheck/main_test.go b/core/pkg/ingress/annotations/healthcheck/main_test.go index 09215b17b..14786bacc 100644 --- a/core/pkg/ingress/annotations/healthcheck/main_test.go +++ b/core/pkg/ingress/annotations/healthcheck/main_test.go @@ -19,10 +19,10 @@ package healthcheck import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/defaults" ) diff --git a/core/pkg/ingress/annotations/ipwhitelist/main.go b/core/pkg/ingress/annotations/ipwhitelist/main.go index 6f394360e..e775abe3a 100644 --- a/core/pkg/ingress/annotations/ipwhitelist/main.go +++ b/core/pkg/ingress/annotations/ipwhitelist/main.go @@ -22,8 +22,8 @@ import ( "github.com/pkg/errors" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" - "k8s.io/kubernetes/pkg/util/net/sets" + extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/ingress/core/pkg/net" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ing_errors "k8s.io/ingress/core/pkg/ingress/errors" @@ -36,7 +36,36 @@ const ( // SourceRange returns the CIDR type SourceRange struct { - CIDR []string `json:"cidr"` + CIDR []string `json:"cidr,omitEmpty"` +} + +// Equal tests for equality between two SourceRange types +func (sr1 *SourceRange) Equal(sr2 *SourceRange) bool { + if sr1 == sr2 { + return true + } + if sr1 == nil || sr2 == nil { + return false + } + + if len(sr1.CIDR) != len(sr2.CIDR) { + return false + } + + for _, s1l := range sr1.CIDR { + found := false + for _, sl2 := range sr2.CIDR { + if s1l == sl2 { + found = true + break + } + } + if !found { + return false + } + } + + return true } type ipwhitelist struct { @@ -63,8 +92,8 @@ func (a ipwhitelist) Parse(ing *extensions.Ingress) (interface{}, error) { } values := strings.Split(val, ",") - ipnets, err := sets.ParseIPNets(values...) - if err != nil { + ipnets, ips, err := net.ParseIPNets(values...) + if err != nil && len(ips) == 0 { return &SourceRange{CIDR: defBackend.WhitelistSourceRange}, ing_errors.LocationDenied{ Reason: errors.Wrap(err, "the annotation does not contain a valid IP address or network"), } @@ -74,6 +103,9 @@ func (a ipwhitelist) Parse(ing *extensions.Ingress) (interface{}, error) { for k := range ipnets { cidrs = append(cidrs, k) } + for k := range ips { + cidrs = append(cidrs, k) + } sort.Strings(cidrs) diff --git a/core/pkg/ingress/annotations/ipwhitelist/main_test.go b/core/pkg/ingress/annotations/ipwhitelist/main_test.go index ba2c946a1..b5ab5ed2f 100644 --- a/core/pkg/ingress/annotations/ipwhitelist/main_test.go +++ b/core/pkg/ingress/annotations/ipwhitelist/main_test.go @@ -17,16 +17,14 @@ limitations under the License. package ipwhitelist import ( - "reflect" "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/defaults" - "k8s.io/ingress/core/pkg/ingress/errors" ) func buildIngress() *extensions.Ingress { @@ -73,157 +71,118 @@ func (m mockBackend) GetDefaultBackend() defaults.Backend { } func TestParseAnnotations(t *testing.T) { - // TODO: convert test cases to tables ing := buildIngress() - - testNet := "10.0.0.0/24" - enet := []string{testNet} - - data := map[string]string{} - data[whitelist] = testNet - ing.SetAnnotations(data) - - expected := &SourceRange{ - CIDR: enet, + tests := map[string]struct { + net string + expectCidr []string + expectErr bool + errOut string + }{ + "test parse a valid net": { + net: "10.0.0.0/24", + expectCidr: []string{"10.0.0.0/24"}, + expectErr: false, + }, + "test parse a invalid net": { + net: "ww", + expectErr: true, + errOut: "the annotation does not contain a valid IP address or network: invalid CIDR address: ww", + }, + "test parse a empty net": { + net: "", + expectErr: true, + errOut: "the annotation does not contain a valid IP address or network: invalid CIDR address: ", + }, + "test parse multiple valid cidr": { + net: "2.2.2.2/32,1.1.1.1/32,3.3.3.0/24", + expectCidr: []string{"1.1.1.1/32", "2.2.2.2/32", "3.3.3.0/24"}, + expectErr: false, + }, } - p := NewParser(mockBackend{}) - - i, err := p.Parse(ing) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - sr, ok := i.(*SourceRange) - if !ok { - t.Errorf("expected a SourceRange type") - } - - if !reflect.DeepEqual(sr, expected) { - t.Errorf("expected %v but returned %s", sr, expected) - } - - data[whitelist] = "www" - _, err = p.Parse(ing) - if err == nil { - t.Errorf("expected error parsing an invalid cidr") - } - - if !errors.IsLocationDenied(err) { - t.Errorf("expected LocationDenied error: %+v", err) - } - - delete(data, whitelist) - i, err = p.Parse(ing) - - if err != nil { - t.Errorf("unexpected error when no annotation present: %v", err) - } - - sr, ok = i.(*SourceRange) - if !ok { - t.Errorf("expected a SourceRange type") - } - if !strsEquals(sr.CIDR, []string{}) { - t.Errorf("expected empty CIDR but %v returned", sr.CIDR) - } - - i, _ = p.Parse(&extensions.Ingress{}) - sr, ok = i.(*SourceRange) - if !ok { - t.Errorf("expected a SourceRange type") - } - if !strsEquals(sr.CIDR, []string{}) { - t.Errorf("expected empty CIDR but %v returned", sr.CIDR) - } - - data[whitelist] = "2.2.2.2/32,1.1.1.1/32,3.3.3.0/24" - i, _ = p.Parse(ing) - sr, ok = i.(*SourceRange) - if !ok { - t.Errorf("expected a SourceRange type") - } - ecidr := []string{"1.1.1.1/32", "2.2.2.2/32", "3.3.3.0/24"} - if !strsEquals(sr.CIDR, ecidr) { - t.Errorf("Expected %v CIDR but %v returned", ecidr, sr.CIDR) + for testName, test := range tests { + data := map[string]string{} + data[whitelist] = test.net + ing.SetAnnotations(data) + p := NewParser(mockBackend{}) + i, err := p.Parse(ing) + if err != nil && !test.expectErr { + t.Errorf("%v:unexpected error: %v", testName, err) + } + if test.expectErr { + if err.Error() != test.errOut { + t.Errorf("%v:expected error: %v but %v return", testName, test.errOut, err.Error()) + } + } + if !test.expectErr { + sr, ok := i.(*SourceRange) + if !ok { + t.Errorf("%v:expected a SourceRange type", testName) + } + if !strsEquals(sr.CIDR, test.expectCidr) { + t.Errorf("%v:expected %v CIDR but %v returned", testName, test.expectCidr, sr.CIDR) + } + } } } // Test that when we have a whitelist set on the Backend that is used when we // don't have the annotation func TestParseAnnotationsWithDefaultConfig(t *testing.T) { - // TODO: convert test cases to tables ing := buildIngress() - mockBackend := mockBackend{} mockBackend.Backend.WhitelistSourceRange = []string{"4.4.4.0/24", "1.2.3.4/32"} - testNet := "10.0.0.0/24" - enet := []string{testNet} - - data := map[string]string{} - data[whitelist] = testNet - ing.SetAnnotations(data) - - expected := &SourceRange{ - CIDR: enet, + tests := map[string]struct { + net string + expectCidr []string + expectErr bool + errOut string + }{ + "test parse a valid net": { + net: "10.0.0.0/24", + expectCidr: []string{"10.0.0.0/24"}, + expectErr: false, + }, + "test parse a invalid net": { + net: "ww", + expectErr: true, + errOut: "the annotation does not contain a valid IP address or network: invalid CIDR address: ww", + }, + "test parse a empty net": { + net: "", + expectErr: true, + errOut: "the annotation does not contain a valid IP address or network: invalid CIDR address: ", + }, + "test parse multiple valid cidr": { + net: "2.2.2.2/32,1.1.1.1/32,3.3.3.0/24", + expectCidr: []string{"1.1.1.1/32", "2.2.2.2/32", "3.3.3.0/24"}, + expectErr: false, + }, } - p := NewParser(mockBackend) - - i, err := p.Parse(ing) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - sr, ok := i.(*SourceRange) - if !ok { - t.Errorf("expected a SourceRange type") - } - - if !reflect.DeepEqual(sr, expected) { - t.Errorf("expected %v but returned %s", sr, expected) - } - - data[whitelist] = "www" - _, err = p.Parse(ing) - if err == nil { - t.Errorf("expected error parsing an invalid cidr") - } - if !errors.IsLocationDenied(err) { - t.Errorf("expected LocationDenied error: %+v", err) - } - - delete(data, whitelist) - i, err = p.Parse(ing) - - if err != nil { - t.Errorf("unexpected error when no annotation present: %v", err) - } - - sr, ok = i.(*SourceRange) - if !ok { - t.Errorf("expected a SourceRange type") - } - if !strsEquals(sr.CIDR, mockBackend.WhitelistSourceRange) { - t.Errorf("expected fallback CIDR but %v returned", sr.CIDR) - } - - i, _ = p.Parse(&extensions.Ingress{}) - sr, ok = i.(*SourceRange) - if !ok { - t.Errorf("expected a SourceRange type") - } - if !strsEquals(sr.CIDR, mockBackend.WhitelistSourceRange) { - t.Errorf("expected fallback CIDR but %v returned", sr.CIDR) - } - - data[whitelist] = "2.2.2.2/32,1.1.1.1/32,3.3.3.0/24" - i, _ = p.Parse(ing) - sr, ok = i.(*SourceRange) - if !ok { - t.Errorf("expected a SourceRange type") - } - ecidr := []string{"1.1.1.1/32", "2.2.2.2/32", "3.3.3.0/24"} - if !strsEquals(sr.CIDR, ecidr) { - t.Errorf("Expected %v CIDR but %v returned", ecidr, sr.CIDR) + for testName, test := range tests { + data := map[string]string{} + data[whitelist] = test.net + ing.SetAnnotations(data) + p := NewParser(mockBackend) + i, err := p.Parse(ing) + if err != nil && !test.expectErr { + t.Errorf("%v:unexpected error: %v", testName, err) + } + if test.expectErr { + if err.Error() != test.errOut { + t.Errorf("%v:expected error: %v but %v return", testName, test.errOut, err.Error()) + } + } + if !test.expectErr { + sr, ok := i.(*SourceRange) + if !ok { + t.Errorf("%v:expected a SourceRange type", testName) + } + if !strsEquals(sr.CIDR, test.expectCidr) { + t.Errorf("%v:expected %v CIDR but %v returned", testName, test.expectCidr, sr.CIDR) + } + } } } diff --git a/core/pkg/ingress/annotations/parser/main.go b/core/pkg/ingress/annotations/parser/main.go index a35c80a38..929ff657f 100644 --- a/core/pkg/ingress/annotations/parser/main.go +++ b/core/pkg/ingress/annotations/parser/main.go @@ -19,7 +19,7 @@ package parser import ( "strconv" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/errors" ) diff --git a/core/pkg/ingress/annotations/parser/main_test.go b/core/pkg/ingress/annotations/parser/main_test.go index 8dd2db1b5..4bcc3188e 100644 --- a/core/pkg/ingress/annotations/parser/main_test.go +++ b/core/pkg/ingress/annotations/parser/main_test.go @@ -19,9 +19,9 @@ package parser import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func buildIngress() *extensions.Ingress { diff --git a/core/pkg/ingress/annotations/portinredirect/main.go b/core/pkg/ingress/annotations/portinredirect/main.go index 2f264f32d..3854f7602 100644 --- a/core/pkg/ingress/annotations/portinredirect/main.go +++ b/core/pkg/ingress/annotations/portinredirect/main.go @@ -17,7 +17,7 @@ limitations under the License. package portinredirect import ( - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/resolver" diff --git a/core/pkg/ingress/annotations/portinredirect/main_test.go b/core/pkg/ingress/annotations/portinredirect/main_test.go index 46fe8ad92..4c43e2895 100644 --- a/core/pkg/ingress/annotations/portinredirect/main_test.go +++ b/core/pkg/ingress/annotations/portinredirect/main_test.go @@ -19,10 +19,10 @@ package portinredirect import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "fmt" diff --git a/core/pkg/ingress/annotations/proxy/main.go b/core/pkg/ingress/annotations/proxy/main.go index e097edd24..e31fb4ba7 100644 --- a/core/pkg/ingress/annotations/proxy/main.go +++ b/core/pkg/ingress/annotations/proxy/main.go @@ -17,31 +17,80 @@ limitations under the License. package proxy import ( - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/resolver" ) const ( - bodySize = "ingress.kubernetes.io/proxy-body-size" - connect = "ingress.kubernetes.io/proxy-connect-timeout" - send = "ingress.kubernetes.io/proxy-send-timeout" - read = "ingress.kubernetes.io/proxy-read-timeout" - bufferSize = "ingress.kubernetes.io/proxy-buffer-size" - cookiePath = "ingress.kubernetes.io/proxy-cookie-path" - cookieDomain = "ingress.kubernetes.io/proxy-cookie-domain" + bodySize = "ingress.kubernetes.io/proxy-body-size" + connect = "ingress.kubernetes.io/proxy-connect-timeout" + send = "ingress.kubernetes.io/proxy-send-timeout" + read = "ingress.kubernetes.io/proxy-read-timeout" + bufferSize = "ingress.kubernetes.io/proxy-buffer-size" + cookiePath = "ingress.kubernetes.io/proxy-cookie-path" + cookieDomain = "ingress.kubernetes.io/proxy-cookie-domain" + nextUpstream = "ingress.kubernetes.io/proxy-next-upstream" + passParams = "ingress.kubernetes.io/proxy-pass-params" + requestBuffering = "ingress.kubernetes.io/proxy-request-buffering" ) // Configuration returns the proxy timeout to use in the upstream server/s type Configuration struct { - BodySize string `json:"bodySize"` - ConnectTimeout int `json:"conectTimeout"` - SendTimeout int `json:"sendTimeout"` - ReadTimeout int `json:"readTimeout"` - BufferSize string `json:"bufferSize"` - CookieDomain string `json:"cookieDomain"` - CookiePath string `json:"cookiePath"` + BodySize string `json:"bodySize"` + ConnectTimeout int `json:"connectTimeout"` + SendTimeout int `json:"sendTimeout"` + ReadTimeout int `json:"readTimeout"` + BufferSize string `json:"bufferSize"` + CookieDomain string `json:"cookieDomain"` + CookiePath string `json:"cookiePath"` + NextUpstream string `json:"nextUpstream"` + PassParams string `json:"passParams"` + RequestBuffering string `json:"requestBuffering"` +} + +// Equal tests for equality between two Configuration types +func (l1 *Configuration) Equal(l2 *Configuration) bool { + if l1 == l2 { + return true + } + if l1 == nil || l2 == nil { + return false + } + if l1.BodySize != l2.BodySize { + return false + } + if l1.ConnectTimeout != l2.ConnectTimeout { + return false + } + if l1.SendTimeout != l2.SendTimeout { + return false + } + if l1.ReadTimeout != l2.ReadTimeout { + return false + } + if l1.BufferSize != l2.BufferSize { + return false + } + if l1.CookieDomain != l2.CookieDomain { + return false + } + if l1.CookiePath != l2.CookiePath { + return false + } + if l1.NextUpstream != l2.NextUpstream { + return false + } + if l1.PassParams != l2.PassParams { + return false + } + + if l1.RequestBuffering != l2.RequestBuffering { + return false + } + + return true } type proxy struct { @@ -92,5 +141,20 @@ func (a proxy) Parse(ing *extensions.Ingress) (interface{}, error) { bs = defBackend.ProxyBodySize } - return &Configuration{bs, ct, st, rt, bufs, cd, cp}, nil + nu, err := parser.GetStringAnnotation(nextUpstream, ing) + if err != nil || nu == "" { + nu = defBackend.ProxyNextUpstream + } + + pp, err := parser.GetStringAnnotation(passParams, ing) + if err != nil || pp == "" { + pp = defBackend.ProxyPassParams + } + + rb, err := parser.GetStringAnnotation(requestBuffering, ing) + if err != nil || rb == "" { + rb = defBackend.ProxyRequestBuffering + } + + return &Configuration{bs, ct, st, rt, bufs, cd, cp, nu, pp, rb}, nil } diff --git a/core/pkg/ingress/annotations/proxy/main_test.go b/core/pkg/ingress/annotations/proxy/main_test.go index 11e85dc30..8700457f1 100644 --- a/core/pkg/ingress/annotations/proxy/main_test.go +++ b/core/pkg/ingress/annotations/proxy/main_test.go @@ -19,10 +19,10 @@ package proxy import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/defaults" ) @@ -73,6 +73,9 @@ func (m mockBackend) GetDefaultBackend() defaults.Backend { ProxyReadTimeout: 20, ProxyBufferSize: "10k", ProxyBodySize: "3k", + ProxyNextUpstream: "error", + ProxyPassParams: "nocanon keepalive=On", + ProxyRequestBuffering: "on", } } @@ -85,6 +88,9 @@ func TestProxy(t *testing.T) { data[read] = "3" data[bufferSize] = "1k" data[bodySize] = "2k" + data[nextUpstream] = "off" + data[passParams] = "smax=5 max=10" + data[requestBuffering] = "off" ing.SetAnnotations(data) i, err := NewParser(mockBackend{}).Parse(ing) @@ -110,6 +116,15 @@ func TestProxy(t *testing.T) { if p.BodySize != "2k" { t.Errorf("expected 2k as body-size but returned %v", p.BodySize) } + if p.NextUpstream != "off" { + t.Errorf("expected off as next-upstream but returned %v", p.NextUpstream) + } + if p.PassParams != "smax=5 max=10" { + t.Errorf("expected \"smax=5 max=10\" as pass-params but returned \"%v\"", p.PassParams) + } + if p.RequestBuffering != "off" { + t.Errorf("expected off as request-buffering but returned %v", p.RequestBuffering) + } } func TestProxyWithNoAnnotation(t *testing.T) { @@ -141,4 +156,13 @@ func TestProxyWithNoAnnotation(t *testing.T) { if p.BodySize != "3k" { t.Errorf("expected 3k as body-size but returned %v", p.BodySize) } + if p.NextUpstream != "error" { + t.Errorf("expected error as next-upstream but returned %v", p.NextUpstream) + } + if p.PassParams != "nocanon keepalive=On" { + t.Errorf("expected \"nocanon keepalive=On\" as pass-params but returned \"%v\"", p.PassParams) + } + if p.RequestBuffering != "on" { + t.Errorf("expected on as request-buffering but returned %v", p.RequestBuffering) + } } diff --git a/core/pkg/ingress/annotations/ratelimit/main.go b/core/pkg/ingress/annotations/ratelimit/main.go index 06a0160b7..1c173ec48 100644 --- a/core/pkg/ingress/annotations/ratelimit/main.go +++ b/core/pkg/ingress/annotations/ratelimit/main.go @@ -18,15 +18,24 @@ package ratelimit import ( "fmt" + "sort" + "strings" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/ingress/core/pkg/base64" "k8s.io/ingress/core/pkg/ingress/annotations/parser" + "k8s.io/ingress/core/pkg/ingress/resolver" + "k8s.io/ingress/core/pkg/net" ) const ( - limitIP = "ingress.kubernetes.io/limit-connections" - limitRPS = "ingress.kubernetes.io/limit-rps" + limitIP = "ingress.kubernetes.io/limit-connections" + limitRPS = "ingress.kubernetes.io/limit-rps" + limitRPM = "ingress.kubernetes.io/limit-rpm" + limitRATE = "ingress.kubernetes.io/limit-rate" + limitRATEAFTER = "ingress.kubernetes.io/limit-rate-after" + limitWhitelist = "ingress.kubernetes.io/limit-whitelist" // allow 5 times the specified limit as burst defBurst = 5 @@ -45,6 +54,67 @@ type RateLimit struct { Connections Zone `json:"connections"` // RPS indicates a limit with the number of connections per second RPS Zone `json:"rps"` + + RPM Zone `json:"rpm"` + + LimitRate int `json:"limit-rate"` + + LimitRateAfter int `json:"limit-rate-after"` + + Name string `json:"name"` + + ID string `json:"id"` + + Whitelist []string `json:"whitelist"` +} + +// Equal tests for equality between two RateLimit types +func (rt1 *RateLimit) Equal(rt2 *RateLimit) bool { + if rt1 == rt2 { + return true + } + if rt1 == nil || rt2 == nil { + return false + } + if !(&rt1.Connections).Equal(&rt2.Connections) { + return false + } + if !(&rt1.RPM).Equal(&rt2.RPM) { + return false + } + if !(&rt1.RPS).Equal(&rt2.RPS) { + return false + } + if rt1.LimitRate != rt2.LimitRate { + return false + } + if rt1.LimitRateAfter != rt2.LimitRateAfter { + return false + } + if rt1.ID != rt2.ID { + return false + } + if rt1.Name != rt2.Name { + return false + } + if len(rt1.Whitelist) != len(rt2.Whitelist) { + return false + } + + for _, r1l := range rt1.Whitelist { + found := false + for _, rl2 := range rt2.Whitelist { + if r1l == rl2 { + found = true + break + } + } + if !found { + return false + } + } + + return true } // Zone returns information about the NGINX rate limit (limit_req_zone) @@ -57,25 +127,70 @@ type Zone struct { SharedSize int `json:"sharedSize"` } +// Equal tests for equality between two Zone types +func (z1 *Zone) Equal(z2 *Zone) bool { + if z1 == z2 { + return true + } + if z1 == nil || z2 == nil { + return false + } + if z1.Name != z2.Name { + return false + } + if z1.Limit != z2.Limit { + return false + } + if z1.Burst != z2.Burst { + return false + } + if z1.SharedSize != z2.SharedSize { + return false + } + + return true +} + type ratelimit struct { + backendResolver resolver.DefaultBackend } // NewParser creates a new ratelimit annotation parser -func NewParser() parser.IngressAnnotation { - return ratelimit{} +func NewParser(br resolver.DefaultBackend) parser.IngressAnnotation { + return ratelimit{br} } // ParseAnnotations parses the annotations contained in the ingress // rule used to rewrite the defined paths func (a ratelimit) Parse(ing *extensions.Ingress) (interface{}, error) { + defBackend := a.backendResolver.GetDefaultBackend() + lr, err := parser.GetIntAnnotation(limitRATE, ing) + if err != nil { + lr = defBackend.LimitRate + } + lra, err := parser.GetIntAnnotation(limitRATEAFTER, ing) + if err != nil { + lra = defBackend.LimitRateAfter + } + rpm, _ := parser.GetIntAnnotation(limitRPM, ing) rps, _ := parser.GetIntAnnotation(limitRPS, ing) conn, _ := parser.GetIntAnnotation(limitIP, ing) - if rps == 0 && conn == 0 { + val, _ := parser.GetStringAnnotation(limitWhitelist, ing) + + cidrs, err := parseCIDRs(val) + if err != nil { + return nil, err + } + + if rpm == 0 && rps == 0 && conn == 0 { return &RateLimit{ - Connections: Zone{}, - RPS: Zone{}, + Connections: Zone{}, + RPS: Zone{}, + RPM: Zone{}, + LimitRate: lr, + LimitRateAfter: lra, }, nil } @@ -94,5 +209,42 @@ func (a ratelimit) Parse(ing *extensions.Ingress) (interface{}, error) { Burst: rps * defBurst, SharedSize: defSharedSize, }, + RPM: Zone{ + Name: fmt.Sprintf("%v_rpm", zoneName), + Limit: rpm, + Burst: rpm * defBurst, + SharedSize: defSharedSize, + }, + LimitRate: lr, + LimitRateAfter: lra, + Name: zoneName, + ID: base64.Encode(zoneName), + Whitelist: cidrs, }, nil } + +func parseCIDRs(s string) ([]string, error) { + if s == "" { + return []string{}, nil + } + + values := strings.Split(s, ",") + + ipnets, ips, err := net.ParseIPNets(values...) + if err != nil { + return nil, err + } + + cidrs := []string{} + for k := range ipnets { + cidrs = append(cidrs, k) + } + + for k := range ips { + cidrs = append(cidrs, k) + } + + sort.Strings(cidrs) + + return cidrs, nil +} diff --git a/core/pkg/ingress/annotations/ratelimit/main_test.go b/core/pkg/ingress/annotations/ratelimit/main_test.go index 4718851de..e7d405c03 100644 --- a/core/pkg/ingress/annotations/ratelimit/main_test.go +++ b/core/pkg/ingress/annotations/ratelimit/main_test.go @@ -19,11 +19,12 @@ package ratelimit import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress/core/pkg/ingress/defaults" ) func buildIngress() *extensions.Ingress { @@ -61,9 +62,19 @@ func buildIngress() *extensions.Ingress { } } +type mockBackend struct { +} + +func (m mockBackend) GetDefaultBackend() defaults.Backend { + return defaults.Backend{ + LimitRateAfter: 0, + LimitRate: 0, + } +} + func TestWithoutAnnotations(t *testing.T) { ing := buildIngress() - _, err := NewParser().Parse(ing) + _, err := NewParser(mockBackend{}).Parse(ing) if err != nil { t.Error("unexpected error with ingress without annotations") } @@ -75,9 +86,10 @@ func TestBadRateLimiting(t *testing.T) { data := map[string]string{} data[limitIP] = "0" data[limitRPS] = "0" + data[limitRPM] = "0" ing.SetAnnotations(data) - _, err := NewParser().Parse(ing) + _, err := NewParser(mockBackend{}).Parse(ing) if err != nil { t.Errorf("unexpected error with invalid limits (0)") } @@ -85,9 +97,13 @@ func TestBadRateLimiting(t *testing.T) { data = map[string]string{} data[limitIP] = "5" data[limitRPS] = "100" + data[limitRPM] = "10" + data[limitRATEAFTER] = "100" + data[limitRATE] = "10" + ing.SetAnnotations(data) - i, err := NewParser().Parse(ing) + i, err := NewParser(mockBackend{}).Parse(ing) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -101,4 +117,13 @@ func TestBadRateLimiting(t *testing.T) { if rateLimit.RPS.Limit != 100 { t.Errorf("expected 100 in limit by rps but %v was returend", rateLimit.RPS) } + if rateLimit.RPM.Limit != 10 { + t.Errorf("expected 10 in limit by rpm but %v was returend", rateLimit.RPM) + } + if rateLimit.LimitRateAfter != 100 { + t.Errorf("expected 100 in limit by limitrateafter but %v was returend", rateLimit.LimitRateAfter) + } + if rateLimit.LimitRate != 10 { + t.Errorf("expected 10 in limit by limitrate but %v was returend", rateLimit.LimitRate) + } } diff --git a/core/pkg/ingress/annotations/redirect/redirect.go b/core/pkg/ingress/annotations/redirect/redirect.go new file mode 100644 index 000000000..b5e366580 --- /dev/null +++ b/core/pkg/ingress/annotations/redirect/redirect.go @@ -0,0 +1,131 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package redirect + +import ( + "net/http" + "net/url" + "strings" + + extensions "k8s.io/api/extensions/v1beta1" + + "k8s.io/ingress/core/pkg/ingress/annotations/parser" + "k8s.io/ingress/core/pkg/ingress/errors" +) + +const ( + permanent = "ingress.kubernetes.io/permanent-redirect" + temporal = "ingress.kubernetes.io/temporal-redirect" + www = "ingress.kubernetes.io/from-to-www-redirect" +) + +// Redirect returns the redirect configuration for an Ingress rule +type Redirect struct { + URL string `json:"url"` + Code int `json:"code"` + FromToWWW bool `json:"fromToWWW"` +} + +type redirect struct{} + +// NewParser creates a new redirect annotation parser +func NewParser() parser.IngressAnnotation { + return redirect{} +} + +// Parse parses the annotations contained in the ingress +// rule used to create a redirect in the paths defined in the rule. +// If the Ingress containes both annotations the execution order is +// temporal and then permanent +func (a redirect) Parse(ing *extensions.Ingress) (interface{}, error) { + r3w, _ := parser.GetBoolAnnotation(www, ing) + + tr, err := parser.GetStringAnnotation(temporal, ing) + if err != nil && !errors.IsMissingAnnotations(err) { + return nil, err + } + + if tr != "" { + if err := isValidURL(tr); err != nil { + return nil, err + } + + return &Redirect{ + URL: tr, + Code: http.StatusFound, + FromToWWW: r3w, + }, nil + } + + pr, err := parser.GetStringAnnotation(permanent, ing) + if err != nil && !errors.IsMissingAnnotations(err) { + return nil, err + } + + if pr != "" { + if err := isValidURL(pr); err != nil { + return nil, err + } + + return &Redirect{ + URL: pr, + Code: http.StatusMovedPermanently, + FromToWWW: r3w, + }, nil + } + + if r3w { + return &Redirect{ + FromToWWW: r3w, + }, nil + } + + return nil, errors.ErrMissingAnnotations +} + +// Equal tests for equality between two Redirect types +func (r1 *Redirect) Equal(r2 *Redirect) bool { + if r1 == r2 { + return true + } + if r1 == nil || r2 == nil { + return false + } + if r1.URL != r2.URL { + return false + } + if r1.Code != r2.Code { + return false + } + if r1.FromToWWW != r2.FromToWWW { + return false + } + return true +} + +func isValidURL(s string) error { + u, err := url.Parse(s) + if err != nil { + return err + } + + if !strings.HasPrefix(u.Scheme, "http") { + return errors.Errorf("only http and https are valid protocols (%v)", u.Scheme) + } + + return nil +} diff --git a/core/pkg/ingress/annotations/rewrite/main.go b/core/pkg/ingress/annotations/rewrite/main.go index 771fc80a7..32cc421ae 100644 --- a/core/pkg/ingress/annotations/rewrite/main.go +++ b/core/pkg/ingress/annotations/rewrite/main.go @@ -17,7 +17,7 @@ limitations under the License. package rewrite import ( - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/resolver" @@ -26,6 +26,7 @@ import ( const ( rewriteTo = "ingress.kubernetes.io/rewrite-target" addBaseURL = "ingress.kubernetes.io/add-base-url" + baseURLScheme = "ingress.kubernetes.io/base-url-scheme" sslRedirect = "ingress.kubernetes.io/ssl-redirect" forceSSLRedirect = "ingress.kubernetes.io/force-ssl-redirect" appRoot = "ingress.kubernetes.io/app-root" @@ -38,6 +39,8 @@ type Redirect struct { // AddBaseURL indicates if is required to add a base tag in the head // of the responses from the upstream servers AddBaseURL bool `json:"addBaseUrl"` + // BaseURLScheme override for the scheme passed to the base tag + BaseURLScheme string `json:"baseUrlScheme"` // SSLRedirect indicates if the location section is accessible SSL only SSLRedirect bool `json:"sslRedirect"` // ForceSSLRedirect indicates if the location section is accessible SSL only @@ -46,6 +49,36 @@ type Redirect struct { AppRoot string `json:"appRoot"` } +// Equal tests for equality between two Redirect types +func (r1 *Redirect) Equal(r2 *Redirect) bool { + if r1 == r2 { + return true + } + if r1 == nil || r2 == nil { + return false + } + if r1.Target != r2.Target { + return false + } + if r1.AddBaseURL != r2.AddBaseURL { + return false + } + if r1.BaseURLScheme != r2.BaseURLScheme { + return false + } + if r1.SSLRedirect != r2.SSLRedirect { + return false + } + if r1.ForceSSLRedirect != r2.ForceSSLRedirect { + return false + } + if r1.AppRoot != r2.AppRoot { + return false + } + + return true +} + type rewrite struct { backendResolver resolver.DefaultBackend } @@ -68,10 +101,12 @@ func (a rewrite) Parse(ing *extensions.Ingress) (interface{}, error) { fSslRe = a.backendResolver.GetDefaultBackend().ForceSSLRedirect } abu, _ := parser.GetBoolAnnotation(addBaseURL, ing) + bus, _ := parser.GetStringAnnotation(baseURLScheme, ing) ar, _ := parser.GetStringAnnotation(appRoot, ing) return &Redirect{ Target: rt, AddBaseURL: abu, + BaseURLScheme: bus, SSLRedirect: sslRe, ForceSSLRedirect: fSslRe, AppRoot: ar, diff --git a/core/pkg/ingress/annotations/rewrite/main_test.go b/core/pkg/ingress/annotations/rewrite/main_test.go index 38f224d9d..6529857f9 100644 --- a/core/pkg/ingress/annotations/rewrite/main_test.go +++ b/core/pkg/ingress/annotations/rewrite/main_test.go @@ -19,10 +19,10 @@ package rewrite import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/defaults" ) diff --git a/core/pkg/ingress/annotations/secureupstream/main.go b/core/pkg/ingress/annotations/secureupstream/main.go index d07f92994..33c979819 100644 --- a/core/pkg/ingress/annotations/secureupstream/main.go +++ b/core/pkg/ingress/annotations/secureupstream/main.go @@ -18,8 +18,9 @@ package secureupstream import ( "fmt" + "github.com/pkg/errors" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/resolver" @@ -32,8 +33,8 @@ const ( // Secure describes SSL backend configuration type Secure struct { - Secure bool - CACert resolver.AuthSSLCert + Secure bool `json:"secure"` + CACert resolver.AuthSSLCert `json:"caCert"` } type su struct { diff --git a/core/pkg/ingress/annotations/secureupstream/main_test.go b/core/pkg/ingress/annotations/secureupstream/main_test.go index 3eea12dd7..961721b4f 100644 --- a/core/pkg/ingress/annotations/secureupstream/main_test.go +++ b/core/pkg/ingress/annotations/secureupstream/main_test.go @@ -19,11 +19,12 @@ package secureupstream import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "fmt" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress/core/pkg/ingress/resolver" ) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/tokenreview_expansion.go b/core/pkg/ingress/annotations/serviceupstream/main.go similarity index 53% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/tokenreview_expansion.go rename to core/pkg/ingress/annotations/serviceupstream/main.go index 012749ec3..a92d4a67c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/tokenreview_expansion.go +++ b/core/pkg/ingress/annotations/serviceupstream/main.go @@ -14,22 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package serviceupstream import ( - authenticationapi "k8s.io/kubernetes/pkg/apis/authentication/v1" + extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/ingress/core/pkg/ingress/annotations/parser" ) -type TokenReviewExpansion interface { - Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) +const ( + annotationServiceUpstream = "ingress.kubernetes.io/service-upstream" +) + +type serviceUpstream struct { } -func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - result = &authenticationapi.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - Body(tokenReview). - Do(). - Into(result) - return +// NewParser creates a new serviceUpstream annotation parser +func NewParser() parser.IngressAnnotation { + return serviceUpstream{} +} + +func (s serviceUpstream) Parse(ing *extensions.Ingress) (interface{}, error) { + return parser.GetBoolAnnotation(annotationServiceUpstream, ing) } diff --git a/core/pkg/ingress/annotations/serviceupstream/main_test.go b/core/pkg/ingress/annotations/serviceupstream/main_test.go new file mode 100644 index 000000000..9dcfdece6 --- /dev/null +++ b/core/pkg/ingress/annotations/serviceupstream/main_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceupstream + +import ( + "testing" + + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func buildIngress() *extensions.Ingress { + defaultBackend := extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + return &extensions.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []extensions.IngressRule{ + { + Host: "foo.bar.com", + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: "/foo", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + } +} + +func TestIngressAnnotationServiceUpstreamEnabled(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[annotationServiceUpstream] = "true" + ing.SetAnnotations(data) + + val, _ := NewParser().Parse(ing) + enabled, ok := val.(bool) + if !ok { + t.Errorf("expected a bool type") + } + + if !enabled { + t.Errorf("expected annotation value to be true, got false") + } +} + +func TestIngressAnnotationServiceUpstreamSetFalse(t *testing.T) { + ing := buildIngress() + + // Test with explicitly set to false + data := map[string]string{} + data[annotationServiceUpstream] = "false" + ing.SetAnnotations(data) + + val, _ := NewParser().Parse(ing) + enabled, ok := val.(bool) + if !ok { + t.Errorf("expected a bool type") + } + + if enabled { + t.Errorf("expected annotation value to be false, got true") + } + + // Test with no annotation specified, should default to false + data = map[string]string{} + ing.SetAnnotations(data) + + val, _ = NewParser().Parse(ing) + enabled, ok = val.(bool) + if !ok { + t.Errorf("expected a bool type") + } + + if enabled { + t.Errorf("expected annotation value to be false, got true") + } +} diff --git a/core/pkg/ingress/annotations/sessionaffinity/main.go b/core/pkg/ingress/annotations/sessionaffinity/main.go index d6760f156..032af3b77 100644 --- a/core/pkg/ingress/annotations/sessionaffinity/main.go +++ b/core/pkg/ingress/annotations/sessionaffinity/main.go @@ -21,7 +21,7 @@ import ( "github.com/golang/glog" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ) @@ -92,10 +92,7 @@ type affinity struct { // ParseAnnotations parses the annotations contained in the ingress // rule used to configure the affinity directives func (a affinity) Parse(ing *extensions.Ingress) (interface{}, error) { - - var cookieAffinityConfig *CookieConfig - cookieAffinityConfig = &CookieConfig{} - + cookieAffinityConfig := &CookieConfig{} // Check the type of affinity that will be used at, err := parser.GetStringAnnotation(annotationAffinityType, ing) if err != nil { diff --git a/core/pkg/ingress/annotations/sessionaffinity/main_test.go b/core/pkg/ingress/annotations/sessionaffinity/main_test.go index 4a44389b1..5008c6b1e 100644 --- a/core/pkg/ingress/annotations/sessionaffinity/main_test.go +++ b/core/pkg/ingress/annotations/sessionaffinity/main_test.go @@ -19,10 +19,10 @@ package sessionaffinity import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func buildIngress() *extensions.Ingress { diff --git a/core/pkg/ingress/annotations/snippet/main.go b/core/pkg/ingress/annotations/snippet/main.go index 8a6970d1b..3c1e5a212 100644 --- a/core/pkg/ingress/annotations/snippet/main.go +++ b/core/pkg/ingress/annotations/snippet/main.go @@ -17,7 +17,7 @@ limitations under the License. package snippet import ( - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ) diff --git a/core/pkg/ingress/annotations/snippet/main_test.go b/core/pkg/ingress/annotations/snippet/main_test.go index 450a21487..b92abfc45 100644 --- a/core/pkg/ingress/annotations/snippet/main_test.go +++ b/core/pkg/ingress/annotations/snippet/main_test.go @@ -19,9 +19,9 @@ package snippet import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" ) func TestParse(t *testing.T) { diff --git a/core/pkg/ingress/annotations/sslpassthrough/main.go b/core/pkg/ingress/annotations/sslpassthrough/main.go index e6895c068..d08634e02 100644 --- a/core/pkg/ingress/annotations/sslpassthrough/main.go +++ b/core/pkg/ingress/annotations/sslpassthrough/main.go @@ -17,7 +17,7 @@ limitations under the License. package sslpassthrough import ( - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/annotations/parser" ing_errors "k8s.io/ingress/core/pkg/ingress/errors" diff --git a/core/pkg/ingress/annotations/sslpassthrough/main_test.go b/core/pkg/ingress/annotations/sslpassthrough/main_test.go index fc4c990e5..bf3e083d8 100644 --- a/core/pkg/ingress/annotations/sslpassthrough/main_test.go +++ b/core/pkg/ingress/annotations/sslpassthrough/main_test.go @@ -19,9 +19,9 @@ package sslpassthrough import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/core/pkg/ingress/annotations/upstreamvhost/main.go b/core/pkg/ingress/annotations/upstreamvhost/main.go new file mode 100644 index 000000000..1603905d0 --- /dev/null +++ b/core/pkg/ingress/annotations/upstreamvhost/main.go @@ -0,0 +1,42 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package upstreamvhost + +import ( + extensions "k8s.io/api/extensions/v1beta1" + + "k8s.io/ingress/core/pkg/ingress/annotations/parser" +) + +const ( + annotation = "ingress.kubernetes.io/upstream-vhost" +) + +type upstreamVhost struct { +} + +// NewParser creates a new upstream VHost annotation parser +func NewParser() parser.IngressAnnotation { + return upstreamVhost{} +} + +// Parse parses the annotations contained in the ingress rule +// used to indicate if the location/s contains a fragment of +// configuration to be included inside the paths of the rules +func (a upstreamVhost) Parse(ing *extensions.Ingress) (interface{}, error) { + return parser.GetStringAnnotation(annotation, ing) +} diff --git a/core/pkg/ingress/controller/annotations.go b/core/pkg/ingress/controller/annotations.go index 2e1c0fc52..8e9f4c8e3 100644 --- a/core/pkg/ingress/controller/annotations.go +++ b/core/pkg/ingress/controller/annotations.go @@ -18,23 +18,29 @@ package controller import ( "github.com/golang/glog" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/ingress/core/pkg/ingress/annotations/alias" "k8s.io/ingress/core/pkg/ingress/annotations/auth" "k8s.io/ingress/core/pkg/ingress/annotations/authreq" "k8s.io/ingress/core/pkg/ingress/annotations/authtls" + "k8s.io/ingress/core/pkg/ingress/annotations/clientbodybuffersize" "k8s.io/ingress/core/pkg/ingress/annotations/cors" + "k8s.io/ingress/core/pkg/ingress/annotations/defaultbackend" "k8s.io/ingress/core/pkg/ingress/annotations/healthcheck" "k8s.io/ingress/core/pkg/ingress/annotations/ipwhitelist" "k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/annotations/portinredirect" "k8s.io/ingress/core/pkg/ingress/annotations/proxy" "k8s.io/ingress/core/pkg/ingress/annotations/ratelimit" + "k8s.io/ingress/core/pkg/ingress/annotations/redirect" "k8s.io/ingress/core/pkg/ingress/annotations/rewrite" "k8s.io/ingress/core/pkg/ingress/annotations/secureupstream" "k8s.io/ingress/core/pkg/ingress/annotations/serversnippet" + "k8s.io/ingress/core/pkg/ingress/annotations/serviceupstream" "k8s.io/ingress/core/pkg/ingress/annotations/sessionaffinity" "k8s.io/ingress/core/pkg/ingress/annotations/snippet" "k8s.io/ingress/core/pkg/ingress/annotations/sslpassthrough" + "k8s.io/ingress/core/pkg/ingress/annotations/upstreamvhost" "k8s.io/ingress/core/pkg/ingress/errors" "k8s.io/ingress/core/pkg/ingress/resolver" ) @@ -43,6 +49,7 @@ type extractorConfig interface { resolver.AuthCertificate resolver.DefaultBackend resolver.Secret + resolver.Service } type annotationExtractor struct { @@ -62,19 +69,25 @@ func newAnnotationExtractor(cfg extractorConfig) annotationExtractor { "Whitelist": ipwhitelist.NewParser(cfg), "UsePortInRedirects": portinredirect.NewParser(cfg), "Proxy": proxy.NewParser(cfg), - "RateLimit": ratelimit.NewParser(), - "Redirect": rewrite.NewParser(cfg), + "RateLimit": ratelimit.NewParser(cfg), + "Redirect": redirect.NewParser(), + "Rewrite": rewrite.NewParser(cfg), "SecureUpstream": secureupstream.NewParser(cfg), + "ServiceUpstream": serviceupstream.NewParser(), "SessionAffinity": sessionaffinity.NewParser(), "SSLPassthrough": sslpassthrough.NewParser(), "ConfigurationSnippet": snippet.NewParser(), "ServerSnippet": serversnippet.NewParser(), + "Alias": alias.NewParser(), + "ClientBodyBufferSize": clientbodybuffersize.NewParser(), + "DefaultBackend": defaultbackend.NewParser(cfg), + "UpstreamVhost": upstreamvhost.NewParser(), }, } } func (e *annotationExtractor) Extract(ing *extensions.Ingress) map[string]interface{} { - anns := make(map[string]interface{}, 0) + anns := make(map[string]interface{}) for name, annotationParser := range e.annotations { val, err := annotationParser.Parse(ing) glog.V(5).Infof("annotation %v in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), val) @@ -83,6 +96,10 @@ func (e *annotationExtractor) Extract(ing *extensions.Ingress) map[string]interf continue } + if !errors.IsLocationDenied(err) { + continue + } + _, alreadyDenied := anns[DeniedKeyName] if !alreadyDenied { anns[DeniedKeyName] = err @@ -102,13 +119,22 @@ func (e *annotationExtractor) Extract(ing *extensions.Ingress) map[string]interf } const ( - secureUpstream = "SecureUpstream" - healthCheck = "HealthCheck" - sslPassthrough = "SSLPassthrough" - sessionAffinity = "SessionAffinity" - serverSnippet = "ServerSnippet" + secureUpstream = "SecureUpstream" + healthCheck = "HealthCheck" + sslPassthrough = "SSLPassthrough" + sessionAffinity = "SessionAffinity" + serviceUpstream = "ServiceUpstream" + serverAlias = "Alias" + clientBodyBufferSize = "ClientBodyBufferSize" + certificateAuth = "CertificateAuth" + serverSnippet = "ServerSnippet" ) +func (e *annotationExtractor) ServiceUpstream(ing *extensions.Ingress) bool { + val, _ := e.annotations[serviceUpstream].Parse(ing) + return val.(bool) +} + func (e *annotationExtractor) SecureUpstream(ing *extensions.Ingress) *secureupstream.Secure { val, err := e.annotations[secureUpstream].Parse(ing) if err != nil { @@ -128,6 +154,16 @@ func (e *annotationExtractor) SSLPassthrough(ing *extensions.Ingress) bool { return val.(bool) } +func (e *annotationExtractor) Alias(ing *extensions.Ingress) string { + val, _ := e.annotations[serverAlias].Parse(ing) + return val.(string) +} + +func (e *annotationExtractor) ClientBodyBufferSize(ing *extensions.Ingress) string { + val, _ := e.annotations[clientBodyBufferSize].Parse(ing) + return val.(string) +} + func (e *annotationExtractor) SessionAffinity(ing *extensions.Ingress) *sessionaffinity.AffinityConfig { val, _ := e.annotations[sessionAffinity].Parse(ing) return val.(*sessionaffinity.AffinityConfig) @@ -140,3 +176,16 @@ func (e *annotationExtractor) ServerSnippet(ing *extensions.Ingress) string { } return val.(string) } + +func (e *annotationExtractor) CertificateAuth(ing *extensions.Ingress) *authtls.AuthSSLConfig { + val, err := e.annotations[certificateAuth].Parse(ing) + if errors.IsMissingAnnotations(err) { + return nil + } + + if err != nil { + glog.Errorf("error parsing certificate auth: %v", err) + } + secure := val.(*authtls.AuthSSLConfig) + return secure +} diff --git a/core/pkg/ingress/controller/annotations_test.go b/core/pkg/ingress/controller/annotations_test.go index 0da4458ab..03e211db3 100644 --- a/core/pkg/ingress/controller/annotations_test.go +++ b/core/pkg/ingress/controller/annotations_test.go @@ -19,10 +19,10 @@ package controller import ( "testing" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/ingress/core/pkg/ingress/defaults" "k8s.io/ingress/core/pkg/ingress/resolver" @@ -37,11 +37,11 @@ const ( annotationAffinityType = "ingress.kubernetes.io/affinity" annotationAffinityCookieName = "ingress.kubernetes.io/session-cookie-name" annotationAffinityCookieHash = "ingress.kubernetes.io/session-cookie-hash" - annotationAuthTlsSecret = "ingress.kubernetes.io/auth-tls-secret" ) type mockCfg struct { - MockSecrets map[string]*api.Secret + MockSecrets map[string]*api.Secret + MockServices map[string]*api.Service } func (m mockCfg) GetDefaultBackend() defaults.Backend { @@ -52,6 +52,10 @@ func (m mockCfg) GetSecret(name string) (*api.Secret, error) { return m.MockSecrets[name], nil } +func (m mockCfg) GetService(name string) (*api.Service, error) { + return m.MockServices[name], nil +} + func (m mockCfg) GetAuthCertificate(name string) (*resolver.AuthSSLCert, error) { if secret, _ := m.GetSecret(name); secret != nil { return &resolver.AuthSSLCert{ diff --git a/core/pkg/ingress/controller/backend_ssl.go b/core/pkg/ingress/controller/backend_ssl.go index 8ce21bb9b..e9ae161e9 100644 --- a/core/pkg/ingress/controller/backend_ssl.go +++ b/core/pkg/ingress/controller/backend_ssl.go @@ -23,7 +23,7 @@ import ( "github.com/golang/glog" - api "k8s.io/client-go/pkg/api/v1" + api "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/ingress/core/pkg/ingress" @@ -33,36 +33,35 @@ import ( // syncSecret keeps in sync Secrets used by Ingress rules with the files on // disk to allow copy of the content of the secret to disk to be used // by external processes. -func (ic *GenericController) syncSecret() { - glog.V(3).Infof("starting syncing of secrets") +func (ic *GenericController) syncSecret(key string) { + glog.V(3).Infof("starting syncing of secret %v", key) - var cert *ingress.SSLCert - var err error - - for _, k := range ic.secretTracker.List() { - key := k.(string) - cert, err = ic.getPemCertificate(key) - if err != nil { - glog.Warningf("error obtaining PEM from secret %v: %v", key, err) - continue - } - - // create certificates and add or update the item in the store - cur, exists := ic.sslCertTracker.Get(key) - if exists { - s := cur.(*ingress.SSLCert) - if reflect.DeepEqual(s, cert) { - // no need to update - continue - } - glog.Infof("updating secret %v in the local store", key) - ic.sslCertTracker.Update(key, cert) - continue - } - - glog.Infof("adding secret %v to the local store", key) - ic.sslCertTracker.Add(key, cert) + cert, err := ic.getPemCertificate(key) + if err != nil { + glog.Warningf("error obtaining PEM from secret %v: %v", key, err) + return } + + // create certificates and add or update the item in the store + cur, exists := ic.sslCertTracker.Get(key) + if exists { + s := cur.(*ingress.SSLCert) + if reflect.DeepEqual(s, cert) { + // no need to update + return + } + glog.Infof("updating secret %v in the local store", key) + ic.sslCertTracker.Update(key, cert) + // we need to force the sync of the secret to disk + ic.syncSecret(key) + // this update must trigger an update + // (like an update event from a change in Ingress) + ic.syncIngress("secret-update") + return + } + + glog.Infof("adding secret %v to the local store", key) + ic.sslCertTracker.Add(key, cert) } // getPemCertificate receives a secret, and creates a ingress.SSLCert as return. @@ -86,11 +85,17 @@ func (ic *GenericController) getPemCertificate(secretName string) (*ingress.SSLC var s *ingress.SSLCert if okcert && okkey { - glog.V(3).Infof("found certificate and private key, configuring %v as a TLS Secret", secretName) s, err = ssl.AddOrUpdateCertAndKey(nsSecName, cert, key, ca) + if err != nil { + return nil, fmt.Errorf("unexpected error creating pem file %v", err) + } + glog.V(3).Infof("found certificate and private key, configuring %v as a TLS Secret (CN: %v)", secretName, s.CN) } else if ca != nil { glog.V(3).Infof("found only ca.crt, configuring %v as an Certificate Authentication secret", secretName) s, err = ssl.AddCertAuth(nsSecName, ca) + if err != nil { + return nil, fmt.Errorf("unexpected error creating pem file %v", err) + } } else { return nil, fmt.Errorf("no keypair or CA cert could be found in %v", secretName) } @@ -115,13 +120,6 @@ func newSSLCertTracker() *sslCertTracker { } } -// secretTracker holds a store of Secrets -type secretTracker struct { - cache.ThreadSafeStore -} - -func newSecretTracker() *secretTracker { - return &secretTracker{ - cache.NewThreadSafeStore(cache.Indexers{}, cache.Indices{}), - } +func (s *sslCertTracker) DeleteAll(key string) { + s.Delete(key) } diff --git a/core/pkg/ingress/controller/backend_ssl_test.go b/core/pkg/ingress/controller/backend_ssl_test.go index e7fb991d5..a8109bff8 100644 --- a/core/pkg/ingress/controller/backend_ssl_test.go +++ b/core/pkg/ingress/controller/backend_ssl_test.go @@ -23,9 +23,9 @@ import ( "fmt" + api_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" testclient "k8s.io/client-go/kubernetes/fake" - api_v1 "k8s.io/client-go/pkg/api/v1" cache_client "k8s.io/client-go/tools/cache" "k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/ingress/store" @@ -110,7 +110,6 @@ func buildGenericControllerForBackendSSL() *GenericController { mapController: buildControllerForBackendSSL(), sslCertTracker: newSSLCertTracker(), - secretTracker: newSecretTracker(), } } @@ -157,7 +156,6 @@ func TestSyncSecret(t *testing.T) { for _, foo := range foos { t.Run(foo.tn, func(t *testing.T) { ic := buildGenericControllerForBackendSSL() - ic.secretTracker.Add(foo.secretName, foo.secretName) // init secret for getPemCertificate secret := buildSecretForBackendSSL() @@ -166,16 +164,17 @@ func TestSyncSecret(t *testing.T) { secret.Data = foo.Data ic.secrLister.Add(secret) + key := "default/foo_secret" // for add - ic.syncSecret() + ic.syncSecret(key) if foo.expectSuccess { // validate - _, exist := ic.sslCertTracker.Get(foo.secretName) + _, exist := ic.sslCertTracker.Get(key) if !exist { t.Errorf("Failed to sync secret: %s", foo.secretName) } else { // for update - ic.syncSecret() + ic.syncSecret(key) } } }) diff --git a/core/pkg/ingress/controller/controller.go b/core/pkg/ingress/controller/controller.go index ac554f1cd..181208de0 100644 --- a/core/pkg/ingress/controller/controller.go +++ b/core/pkg/ingress/controller/controller.go @@ -18,7 +18,7 @@ package controller import ( "fmt" - "os" + "math/rand" "reflect" "sort" "strconv" @@ -28,19 +28,20 @@ import ( "github.com/golang/glog" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1" - api "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/client-go/tools/cache" + fcache "k8s.io/client-go/tools/cache/testing" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" - "k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/ingress/annotations/class" "k8s.io/ingress/core/pkg/ingress/annotations/healthcheck" @@ -49,7 +50,6 @@ import ( "k8s.io/ingress/core/pkg/ingress/defaults" "k8s.io/ingress/core/pkg/ingress/resolver" "k8s.io/ingress/core/pkg/ingress/status" - "k8s.io/ingress/core/pkg/ingress/status/leaderelection/resourcelock" "k8s.io/ingress/core/pkg/ingress/store" "k8s.io/ingress/core/pkg/k8s" "k8s.io/ingress/core/pkg/net/ssl" @@ -58,15 +58,21 @@ import ( ) const ( - defUpstreamName = "upstream-default-backend" - defServerName = "_" - podStoreSyncedPollPeriod = 1 * time.Second - rootLocation = "/" + defUpstreamName = "upstream-default-backend" + defServerName = "_" + rootLocation = "/" + + fakeCertificate = "default-fake-certificate" ) var ( // list of ports that cannot be used by TCP or UDP services reservedPorts = []string{"80", "443", "8181", "18080"} + + fakeCertificatePath = "" + fakeCertificateSHA = "" + + cloner = conversion.NewCloner() ) // GenericController holds the boilerplate code required to build an Ingress controlller. @@ -98,8 +104,6 @@ type GenericController struct { // local store of SSL certificates // (only certificates used in ingress) sslCertTracker *sslCertTracker - // store of secret names referenced from Ingress - secretTracker *secretTracker syncRateLimiter flowcontrol.RateLimiter @@ -109,6 +113,11 @@ type GenericController struct { stopLock *sync.Mutex stopCh chan struct{} + + // runningConfig contains the running configuration in the Backend + runningConfig *ingress.Configuration + + forceReload bool } // Configuration contains all the settings required by an Ingress controller @@ -122,6 +131,7 @@ type Configuration struct { ConfigMapName string ForceNamespaceIsolation bool + DisableNodeList bool // optional TCPConfigMapName string @@ -136,8 +146,10 @@ type Configuration struct { // (for instance NGINX) Backend ingress.Controller - UpdateStatus bool - ElectionID string + UpdateStatus bool + ElectionID string + UpdateStatusOnShutdown bool + SortBackends bool } // newIngressController creates an Ingress controller @@ -146,7 +158,7 @@ func newIngressController(config *Configuration) *GenericController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{ - Interface: config.Client.Core().Events(config.Namespace), + Interface: config.Client.CoreV1().Events(config.Namespace), }) ic := GenericController{ @@ -158,7 +170,6 @@ func newIngressController(config *Configuration) *GenericController { Component: "ingress-controller", }), sslCertTracker: newSSLCertTracker(), - secretTracker: newSecretTracker(), } ic.syncQueue = task.NewTaskQueue(ic.syncIngress) @@ -176,10 +187,22 @@ func newIngressController(config *Configuration) *GenericController { } ic.recorder.Eventf(addIng, api.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", addIng.Namespace, addIng.Name)) ic.syncQueue.Enqueue(obj) - ic.extractSecretNames(addIng) }, DeleteFunc: func(obj interface{}) { - delIng := obj.(*extensions.Ingress) + delIng, ok := obj.(*extensions.Ingress) + if !ok { + // If we reached here it means the ingress was deleted but its final state is unrecorded. + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + glog.Errorf("couldn't get object from tombstone %#v", obj) + return + } + delIng, ok = tombstone.Obj.(*extensions.Ingress) + if !ok { + glog.Errorf("Tombstone contained object that is not an Ingress: %#v", obj) + return + } + } if !class.IsValid(delIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) { glog.Infof("ignoring delete for ingress %v based on annotation %v", delIng.Name, class.IngressKey) return @@ -200,49 +223,51 @@ func newIngressController(config *Configuration) *GenericController { ic.recorder.Eventf(curIng, api.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) } else if validCur && !reflect.DeepEqual(old, cur) { ic.recorder.Eventf(curIng, api.EventTypeNormal, "UPDATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) - } else { - // old and cur are invalid or old and cur doesn't have changes, so ignore - return } + ic.syncQueue.Enqueue(cur) - ic.extractSecretNames(curIng) }, } secrEventHandler := cache.ResourceEventHandlerFuncs{ UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { - ic.syncSecret() + sec := cur.(*api.Secret) + key := fmt.Sprintf("%v/%v", sec.Namespace, sec.Name) + ic.syncSecret(key) } }, DeleteFunc: func(obj interface{}) { - sec := obj.(*api.Secret) + sec, ok := obj.(*api.Secret) + if !ok { + // If we reached here it means the secret was deleted but its final state is unrecorded. + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + glog.Errorf("couldn't get object from tombstone %#v", obj) + return + } + sec, ok = tombstone.Obj.(*api.Secret) + if !ok { + glog.Errorf("Tombstone contained object that is not a Secret: %#v", obj) + return + } + } key := fmt.Sprintf("%v/%v", sec.Namespace, sec.Name) - ic.sslCertTracker.Delete(key) - ic.secretTracker.Delete(key) + ic.sslCertTracker.DeleteAll(key) }, } eventHandler := cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - ep := obj.(*api.Endpoints) - _, found := ep.Annotations[resourcelock.LeaderElectionRecordAnnotationKey] - if found { - return - } ic.syncQueue.Enqueue(obj) }, DeleteFunc: func(obj interface{}) { ic.syncQueue.Enqueue(obj) }, UpdateFunc: func(old, cur interface{}) { - if !reflect.DeepEqual(old, cur) { - ep := cur.(*api.Endpoints) - _, found := ep.Annotations[resourcelock.LeaderElectionRecordAnnotationKey] - if found { - return - } - + oep := old.(*api.Endpoints) + ocur := cur.(*api.Endpoints) + if !reflect.DeepEqual(ocur.Subsets, oep.Subsets) { ic.syncQueue.Enqueue(cur) } }, @@ -255,6 +280,7 @@ func newIngressController(config *Configuration) *GenericController { if mapKey == ic.cfg.ConfigMapName { glog.V(2).Infof("adding configmap %v to backend", mapKey) ic.cfg.Backend.SetConfig(upCmap) + ic.forceReload = true } }, UpdateFunc: func(old, cur interface{}) { @@ -264,6 +290,7 @@ func newIngressController(config *Configuration) *GenericController { if mapKey == ic.cfg.ConfigMapName { glog.V(2).Infof("updating configmap backend (%v)", mapKey) ic.cfg.Backend.SetConfig(upCmap) + ic.forceReload = true } // updates to configuration configmaps can trigger an update if mapKey == ic.cfg.ConfigMapName || mapKey == ic.cfg.TCPConfigMapName || mapKey == ic.cfg.UDPConfigMapName { @@ -280,42 +307,49 @@ func newIngressController(config *Configuration) *GenericController { } ic.ingLister.Store, ic.ingController = cache.NewInformer( - cache.NewListWatchFromClient(ic.cfg.Client.Extensions().RESTClient(), "ingresses", ic.cfg.Namespace, fields.Everything()), + cache.NewListWatchFromClient(ic.cfg.Client.ExtensionsV1beta1().RESTClient(), "ingresses", ic.cfg.Namespace, fields.Everything()), &extensions.Ingress{}, ic.cfg.ResyncPeriod, ingEventHandler) ic.endpLister.Store, ic.endpController = cache.NewInformer( - cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "endpoints", ic.cfg.Namespace, fields.Everything()), + cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "endpoints", ic.cfg.Namespace, fields.Everything()), &api.Endpoints{}, ic.cfg.ResyncPeriod, eventHandler) ic.secrLister.Store, ic.secrController = cache.NewInformer( - cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "secrets", watchNs, fields.Everything()), + cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "secrets", watchNs, fields.Everything()), &api.Secret{}, ic.cfg.ResyncPeriod, secrEventHandler) ic.mapLister.Store, ic.mapController = cache.NewInformer( - cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "configmaps", watchNs, fields.Everything()), + cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "configmaps", watchNs, fields.Everything()), &api.ConfigMap{}, ic.cfg.ResyncPeriod, mapEventHandler) ic.svcLister.Store, ic.svcController = cache.NewInformer( - cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "services", ic.cfg.Namespace, fields.Everything()), + cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "services", ic.cfg.Namespace, fields.Everything()), &api.Service{}, ic.cfg.ResyncPeriod, cache.ResourceEventHandlerFuncs{}) + var nodeListerWatcher cache.ListerWatcher + if config.DisableNodeList { + nodeListerWatcher = fcache.NewFakeControllerSource() + } else { + nodeListerWatcher = cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()) + } ic.nodeLister.Store, ic.nodeController = cache.NewInformer( - cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()), + nodeListerWatcher, &api.Node{}, ic.cfg.ResyncPeriod, cache.ResourceEventHandlerFuncs{}) if config.UpdateStatus { ic.syncStatus = status.NewStatusSyncer(status.Config{ - Client: config.Client, - PublishService: ic.cfg.PublishService, - IngressLister: ic.ingLister, - ElectionID: config.ElectionID, - IngressClass: config.IngressClass, - DefaultIngressClass: config.DefaultIngressClass, + Client: config.Client, + PublishService: ic.cfg.PublishService, + IngressLister: ic.ingLister, + ElectionID: config.ElectionID, + IngressClass: config.IngressClass, + DefaultIngressClass: config.DefaultIngressClass, + UpdateStatusOnShutdown: config.UpdateStatusOnShutdown, + CustomIngressStatus: ic.cfg.Backend.UpdateIngressStatus, }) } else { glog.Warning("Update of ingress status is disabled (flag --update-status=false was specified)") } - ic.annotations = newAnnotationExtractor(ic) ic.cfg.Backend.SetListers(ingress.StoreLister{ @@ -327,6 +361,8 @@ func newIngressController(config *Configuration) *GenericController { ConfigMap: ic.mapLister, }) + cloner.RegisterDeepCopyFunc(ingress.GetGeneratedDeepCopyFuncs) + return &ic } @@ -345,6 +381,11 @@ func (ic GenericController) GetDefaultBackend() defaults.Backend { return ic.cfg.Backend.BackendDefaults() } +// GetRecorder returns the event recorder +func (ic GenericController) GetRecorder() record.EventRecorder { + return ic.recorder +} + // GetSecret searches for a secret in the local secrets Store func (ic GenericController) GetSecret(name string) (*api.Secret, error) { s, exists, err := ic.secrLister.Store.GetByKey(name) @@ -357,6 +398,18 @@ func (ic GenericController) GetSecret(name string) (*api.Secret, error) { return s.(*api.Secret), nil } +// GetService searches for a service in the local secrets Store +func (ic GenericController) GetService(name string) (*api.Service, error) { + s, exists, err := ic.svcLister.Store.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, fmt.Errorf("service %v was not found", name) + } + return s.(*api.Service), nil +} + func (ic *GenericController) getConfigMap(ns, name string) (*api.ConfigMap, error) { s, exists, err := ic.mapLister.Store.GetByKey(fmt.Sprintf("%v/%v", ns, name)) if err != nil { @@ -378,6 +431,13 @@ func (ic *GenericController) syncIngress(key interface{}) error { return nil } + if name, ok := key.(string); ok { + if obj, exists, _ := ic.ingLister.GetByKey(name); exists { + ing := obj.(*extensions.Ingress) + ic.readSecrets(ing) + } + } + upstreams, servers := ic.getBackendServers() var passUpstreams []*ingress.SSLPassthroughBackend @@ -401,14 +461,22 @@ func (ic *GenericController) syncIngress(key interface{}) error { } } - err := ic.cfg.Backend.OnUpdate(ingress.Configuration{ + pcfg := ingress.Configuration{ Backends: upstreams, Servers: servers, TCPEndpoints: ic.getStreamServices(ic.cfg.TCPConfigMapName, api.ProtocolTCP), UDPEndpoints: ic.getStreamServices(ic.cfg.UDPConfigMapName, api.ProtocolUDP), PassthroughBackends: passUpstreams, - }) + } + if !ic.forceReload && ic.runningConfig != nil && ic.runningConfig.Equal(&pcfg) { + glog.V(3).Infof("skipping backend reload (no changes detected)") + return nil + } + + glog.Infof("backend reload required") + + err := ic.cfg.Backend.OnUpdate(pcfg) if err != nil { incReloadErrorCount() glog.Errorf("unexpected failure restarting the backend: \n%v", err) @@ -419,6 +487,9 @@ func (ic *GenericController) syncIngress(key interface{}) error { incReloadCount() setSSLExpireTime(servers) + ic.runningConfig = &pcfg + ic.forceReload = false + return nil } @@ -458,13 +529,21 @@ func (ic *GenericController) getStreamServices(configmapName string, proto api.P } nsSvcPort := strings.Split(v, ":") - if len(nsSvcPort) != 2 { - glog.Warningf("invalid format (namespace/name:port) '%v'", k) + if len(nsSvcPort) < 2 { + glog.Warningf("invalid format (namespace/name:port:[PROXY]) '%v'", k) continue } nsName := nsSvcPort[0] svcPort := nsSvcPort[1] + useProxyProtocol := false + + // Proxy protocol is possible if the service is TCP + if len(nsSvcPort) == 3 && proto == api.ProtocolTCP { + if strings.ToUpper(nsSvcPort[2]) == "PROXY" { + useProxyProtocol = true + } + } svcNs, svcName, err := k8s.ParseNameNS(nsName) if err != nil { @@ -491,8 +570,10 @@ func (ic *GenericController) getStreamServices(configmapName string, proto api.P glog.V(3).Infof("searching service %v/%v endpoints using the name '%v'", svcNs, svcName, svcPort) for _, sp := range svc.Spec.Ports { if sp.Name == svcPort { - endps = ic.getEndpoints(svc, &sp, proto, &healthcheck.Upstream{}) - break + if sp.Protocol == proto { + endps = ic.getEndpoints(svc, &sp, proto, &healthcheck.Upstream{}) + break + } } } } else { @@ -500,8 +581,10 @@ func (ic *GenericController) getStreamServices(configmapName string, proto api.P glog.V(3).Infof("searching service %v/%v endpoints using the target port '%v'", svcNs, svcName, targetPort) for _, sp := range svc.Spec.Ports { if sp.Port == int32(targetPort) { - endps = ic.getEndpoints(svc, &sp, proto, &healthcheck.Upstream{}) - break + if sp.Protocol == proto { + endps = ic.getEndpoints(svc, &sp, proto, &healthcheck.Upstream{}) + break + } } } } @@ -516,10 +599,11 @@ func (ic *GenericController) getStreamServices(configmapName string, proto api.P svcs = append(svcs, ingress.L4Service{ Port: externalPort, Backend: ingress.L4Backend{ - Name: svcName, - Namespace: svcNs, - Port: intstr.FromString(svcPort), - Protocol: proto, + Name: svcName, + Namespace: svcNs, + Port: intstr.FromString(svcPort), + Protocol: proto, + UseProxyProtocol: useProxyProtocol, }, Endpoints: endps, }) @@ -539,13 +623,13 @@ func (ic *GenericController) getDefaultUpstream() *ingress.Backend { svcObj, svcExists, err := ic.svcLister.Store.GetByKey(svcKey) if err != nil { glog.Warningf("unexpected error searching the default backend %v: %v", ic.cfg.DefaultService, err) - upstream.Endpoints = append(upstream.Endpoints, newDefaultServer()) + upstream.Endpoints = append(upstream.Endpoints, ic.cfg.Backend.DefaultEndpoint()) return upstream } if !svcExists { glog.Warningf("service %v does not exist", svcKey) - upstream.Endpoints = append(upstream.Endpoints, newDefaultServer()) + upstream.Endpoints = append(upstream.Endpoints, ic.cfg.Backend.DefaultEndpoint()) return upstream } @@ -553,9 +637,10 @@ func (ic *GenericController) getDefaultUpstream() *ingress.Backend { endps := ic.getEndpoints(svc, &svc.Spec.Ports[0], api.ProtocolTCP, &healthcheck.Upstream{}) if len(endps) == 0 { glog.Warningf("service %v does not have any active endpoints", svcKey) - endps = []ingress.Endpoint{newDefaultServer()} + endps = []ingress.Endpoint{ic.cfg.Backend.DefaultEndpoint()} } + upstream.Service = svc upstream.Endpoints = append(upstream.Endpoints, endps...) return upstream } @@ -579,9 +664,23 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress upstreams := ic.createUpstreams(ings) servers := ic.createServers(ings, upstreams) + // If a server has a hostname equivalent to a pre-existing alias, then we + // remove the alias to avoid conflicts. + for _, server := range servers { + for j, alias := range servers { + if server.Hostname == alias.Alias { + glog.Warningf("There is a conflict with server hostname '%v' and alias '%v' (in server %v). Removing alias to avoid conflicts.", + server.Hostname, alias.Hostname, alias.Hostname) + servers[j].Alias = "" + } + } + } + for _, ingIf := range ings { ing := ingIf.(*extensions.Ingress) + affinity := ic.annotations.SessionAffinity(ing) + if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) { continue } @@ -604,6 +703,15 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress continue } + if server.CertificateAuth.CAFileName == "" { + ca := ic.annotations.CertificateAuth(ing) + if ca != nil { + server.CertificateAuth = *ca + } + } else { + glog.V(3).Infof("server %v already contains a muthual autentication configuration - ingress rule %v/%v", server.Hostname, ing.Namespace, ing.Name) + } + for _, path := range rule.HTTP.Paths { upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), @@ -634,7 +742,11 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress loc.Backend = ups.Name loc.Port = ups.Port loc.Service = ups.Service + loc.Ingress = ing mergeLocationAnnotations(loc, anns) + if loc.Redirect.FromToWWW { + server.RedirectFromToWWW = true + } break } } @@ -647,20 +759,68 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress IsDefBackend: false, Service: ups.Service, Port: ups.Port, + Ingress: ing, } mergeLocationAnnotations(loc, anns) + if loc.Redirect.FromToWWW { + server.RedirectFromToWWW = true + } server.Locations = append(server.Locations, loc) } + + if ups.SessionAffinity.AffinityType == "" { + ups.SessionAffinity.AffinityType = affinity.AffinityType + } + + if affinity.AffinityType == "cookie" { + ups.SessionAffinity.CookieSessionAffinity.Name = affinity.CookieConfig.Name + ups.SessionAffinity.CookieSessionAffinity.Hash = affinity.CookieConfig.Hash + + locs := ups.SessionAffinity.CookieSessionAffinity.Locations + if _, ok := locs[host]; !ok { + locs[host] = []string{} + } + + locs[host] = append(locs[host], path.Path) + } } } } - // Configure Backends[].SSLPassthrough + aUpstreams := make([]*ingress.Backend, 0, len(upstreams)) + for _, upstream := range upstreams { isHTTPSfrom := []*ingress.Server{} for _, server := range servers { for _, location := range server.Locations { if upstream.Name == location.Backend { + if len(upstream.Endpoints) == 0 { + glog.V(3).Infof("upstream %v does not have any active endpoints. Using default backend", upstream.Name) + location.Backend = "upstream-default-backend" + + // check if the location contains endpoints and a custom default backend + if location.DefaultBackend != nil { + sp := location.DefaultBackend.Spec.Ports[0] + endps := ic.getEndpoints(location.DefaultBackend, &sp, api.ProtocolTCP, &healthcheck.Upstream{}) + if len(endps) > 0 { + glog.V(3).Infof("using custom default backend in server %v location %v (service %v/%v)", + server.Hostname, location.Path, location.DefaultBackend.Namespace, location.DefaultBackend.Name) + b, err := cloner.DeepCopy(upstream) + if err != nil { + glog.Errorf("unexpected error copying Upstream: %v", err) + } else { + name := fmt.Sprintf("custom-default-backend-%v", upstream.Name) + nb := b.(*ingress.Backend) + nb.Name = name + nb.Endpoints = endps + aUpstreams = append(aUpstreams, nb) + location.Backend = name + } + } + } + } + + // Configure Backends[].SSLPassthrough if server.SSLPassthrough { if location.Path == rootLocation { if location.Backend == defUpstreamName { @@ -670,28 +830,27 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress isHTTPSfrom = append(isHTTPSfrom, server) } - continue } } } } + if len(isHTTPSfrom) > 0 { upstream.SSLPassthrough = true } } - // TODO: find a way to make this more readable - // The structs must be ordered to always generate the same file - // if the content does not change. - aUpstreams := make([]*ingress.Backend, 0, len(upstreams)) - for _, value := range upstreams { - if len(value.Endpoints) == 0 { - glog.V(3).Infof("upstream %v does not have any active endpoints. Using default backend", value.Name) - value.Endpoints = append(value.Endpoints, newDefaultServer()) + // create the list of upstreams and skip those without endpoints + for _, upstream := range upstreams { + if len(upstream.Endpoints) == 0 { + continue } - aUpstreams = append(aUpstreams, value) + aUpstreams = append(aUpstreams, upstream) + } + + if ic.cfg.SortBackends { + sort.Sort(ingress.BackendByNameServers(aUpstreams)) } - sort.Sort(ingress.BackendByNameServers(aUpstreams)) aServers := make([]*ingress.Server, 0, len(servers)) for _, value := range servers { @@ -705,8 +864,8 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress // GetAuthCertificate ... func (ic GenericController) GetAuthCertificate(secretName string) (*resolver.AuthSSLCert, error) { - if _, exists := ic.secretTracker.Get(secretName); !exists { - ic.secretTracker.Add(secretName, secretName) + if _, exists := ic.sslCertTracker.Get(secretName); !exists { + ic.syncSecret(secretName) } _, err := ic.GetSecret(secretName) @@ -741,7 +900,7 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing secUpstream := ic.annotations.SecureUpstream(ing) hz := ic.annotations.HealthCheck(ing) - affinity := ic.annotations.SessionAffinity(ing) + serviceUpstream := ic.annotations.ServiceUpstream(ing) var defBackend string if ing.Spec.Backend != nil { @@ -752,13 +911,27 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing glog.V(3).Infof("creating upstream %v", defBackend) upstreams[defBackend] = newUpstream(defBackend) - svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName) - endps, err := ic.serviceEndpoints(svcKey, ing.Spec.Backend.ServicePort.String(), hz) - upstreams[defBackend].Endpoints = append(upstreams[defBackend].Endpoints, endps...) - if err != nil { - glog.Warningf("error creating upstream %v: %v", defBackend, err) + + // Add the service cluster endpoint as the upstream instead of individual endpoints + // if the serviceUpstream annotation is enabled + if serviceUpstream { + endpoint, err := ic.getServiceClusterEndpoint(svcKey, ing.Spec.Backend) + if err != nil { + glog.Errorf("Failed to get service cluster endpoint for service %s: %v", svcKey, err) + } else { + upstreams[defBackend].Endpoints = []ingress.Endpoint{endpoint} + } } + + if len(upstreams[defBackend].Endpoints) == 0 { + endps, err := ic.serviceEndpoints(svcKey, ing.Spec.Backend.ServicePort.String(), hz) + upstreams[defBackend].Endpoints = append(upstreams[defBackend].Endpoints, endps...) + if err != nil { + glog.Warningf("error creating upstream %v: %v", defBackend, err) + } + } + } for _, rule := range ing.Spec.Rules { @@ -778,27 +951,37 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing glog.V(3).Infof("creating upstream %v", name) upstreams[name] = newUpstream(name) + upstreams[name].Port = path.Backend.ServicePort + if !upstreams[name].Secure { upstreams[name].Secure = secUpstream.Secure } + if upstreams[name].SecureCACert.Secret == "" { upstreams[name].SecureCACert = secUpstream.CACert } - if upstreams[name].SessionAffinity.AffinityType == "" { - upstreams[name].SessionAffinity.AffinityType = affinity.AffinityType - if affinity.AffinityType == "cookie" { - upstreams[name].SessionAffinity.CookieSessionAffinity.Name = affinity.CookieConfig.Name - upstreams[name].SessionAffinity.CookieSessionAffinity.Hash = affinity.CookieConfig.Hash + + svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) + + // Add the service cluster endpoint as the upstream instead of individual endpoints + // if the serviceUpstream annotation is enabled + if serviceUpstream { + endpoint, err := ic.getServiceClusterEndpoint(svcKey, &path.Backend) + if err != nil { + glog.Errorf("failed to get service cluster endpoint for service %s: %v", svcKey, err) + } else { + upstreams[name].Endpoints = []ingress.Endpoint{endpoint} } } - svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName) - endp, err := ic.serviceEndpoints(svcKey, path.Backend.ServicePort.String(), hz) - if err != nil { - glog.Warningf("error obtaining service endpoints: %v", err) - continue + if len(upstreams[name].Endpoints) == 0 { + endp, err := ic.serviceEndpoints(svcKey, path.Backend.ServicePort.String(), hz) + if err != nil { + glog.Warningf("error obtaining service endpoints: %v", err) + continue + } + upstreams[name].Endpoints = endp } - upstreams[name].Endpoints = endp s, exists, err := ic.svcLister.Store.GetByKey(svcKey) if err != nil { @@ -806,12 +989,12 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing continue } - if exists { - upstreams[name].Service = s.(*api.Service) - } else { + if !exists { glog.Warningf("service %v does not exists", svcKey) + continue } - upstreams[name].Port = path.Backend.ServicePort + + upstreams[name].Service = s.(*api.Service) } } } @@ -819,6 +1002,24 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing return upstreams } +func (ic *GenericController) getServiceClusterEndpoint(svcKey string, backend *extensions.IngressBackend) (endpoint ingress.Endpoint, err error) { + svcObj, svcExists, err := ic.svcLister.Store.GetByKey(svcKey) + + if !svcExists { + return endpoint, fmt.Errorf("service %v does not exist", svcKey) + } + + svc := svcObj.(*api.Service) + if svc.Spec.ClusterIP == "" { + return endpoint, fmt.Errorf("No ClusterIP found for service %s", svcKey) + } + + endpoint.Address = svc.Spec.ClusterIP + endpoint.Port = backend.ServicePort.String() + + return endpoint, err +} + // serviceEndpoints returns the upstream servers (endpoints) associated // to a service. func (ic *GenericController) serviceEndpoints(svcKey, backendPort string, @@ -848,12 +1049,22 @@ func (ic *GenericController) serviceEndpoints(svcKey, backendPort string, glog.Warningf("service %v does not have any active endpoints", svcKey) } - sort.Sort(ingress.EndpointByAddrPort(endps)) + if ic.cfg.SortBackends { + sort.Sort(ingress.EndpointByAddrPort(endps)) + } upstreams = append(upstreams, endps...) break } } + if !ic.cfg.SortBackends { + rand.Seed(time.Now().UnixNano()) + for i := range upstreams { + j := rand.Intn(i + 1) + upstreams[i], upstreams[j] = upstreams[j], upstreams[i] + } + } + return upstreams, nil } @@ -874,39 +1085,21 @@ func (ic *GenericController) createServers(data []interface{}, BufferSize: bdef.ProxyBufferSize, CookieDomain: bdef.ProxyCookieDomain, CookiePath: bdef.ProxyCookiePath, + NextUpstream: bdef.ProxyNextUpstream, } - // This adds the Default Certificate to Default Backend (or generates a new self signed one) - var defaultPemFileName, defaultPemSHA string + defaultPemFileName := fakeCertificatePath + defaultPemSHA := fakeCertificateSHA // Tries to fetch the default Certificate. If it does not exists, generate a new self signed one. defaultCertificate, err := ic.getPemCertificate(ic.cfg.DefaultSSLCertificate) - if err != nil { - // This means the Default Secret does not exists, so we will create a new one. - fakeCertificate := "default-fake-certificate" - fakeCertificatePath := fmt.Sprintf("%v/%v.pem", ingress.DefaultSSLDirectory, fakeCertificate) - - // Only generates a new certificate if it doesn't exists physically - _, err := os.Stat(fakeCertificatePath) - if err != nil { - glog.V(3).Infof("No Default SSL Certificate found. Generating a new one") - defCert, defKey := ssl.GetFakeSSLCert() - defaultCertificate, err = ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{}) - if err != nil { - glog.Fatalf("Error generating self signed certificate: %v", err) - } - defaultPemFileName = defaultCertificate.PemFileName - defaultPemSHA = defaultCertificate.PemSHA - } else { - defaultPemFileName = fakeCertificatePath - defaultPemSHA = ssl.PemSHA1(fakeCertificatePath) - } - } else { + if err == nil { defaultPemFileName = defaultCertificate.PemFileName defaultPemSHA = defaultCertificate.PemSHA } // initialize the default server + du := ic.getDefaultUpstream() servers[defServerName] = &ingress.Server{ Hostname: defServerName, SSLCertificate: defaultPemFileName, @@ -915,8 +1108,9 @@ func (ic *GenericController) createServers(data []interface{}, { Path: rootLocation, IsDefBackend: true, - Backend: ic.getDefaultUpstream().Name, + Backend: du.Name, Proxy: ngxProxy, + Service: du.Service, }, }} @@ -929,12 +1123,13 @@ func (ic *GenericController) createServers(data []interface{}, // check if ssl passthrough is configured sslpt := ic.annotations.SSLPassthrough(ing) - dun := ic.getDefaultUpstream().Name + du := ic.getDefaultUpstream() + un := du.Name if ing.Spec.Backend != nil { // replace default backend defUpstream := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort.String()) if backendUpstream, ok := upstreams[defUpstream]; ok { - dun = backendUpstream.Name + un = backendUpstream.Name } } @@ -954,76 +1149,86 @@ func (ic *GenericController) createServers(data []interface{}, { Path: rootLocation, IsDefBackend: true, - Backend: dun, + Backend: un, Proxy: ngxProxy, + Service: &api.Service{}, }, }, SSLPassthrough: sslpt} } } - // configure default location and SSL + // configure default location, alias, and SSL for _, ingIf := range data { ing := ingIf.(*extensions.Ingress) if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) { continue } + // setup server-alias based on annotations + aliasAnnotation := ic.annotations.Alias(ing) + for _, rule := range ing.Spec.Rules { host := rule.Host if host == "" { host = defServerName } + // setup server aliases + servers[host].Alias = aliasAnnotation + // only add a certificate if the server does not have one previously configured - // TODO: TLS without secret? - if len(ing.Spec.TLS) > 0 && servers[host].SSLCertificate == "" { - tlsSecretName := "" - found := false - for _, tls := range ing.Spec.TLS { - for _, tlsHost := range tls.Hosts { - if tlsHost == host { - tlsSecretName = tls.SecretName - found = true - break - } - } + if servers[host].SSLCertificate != "" { + continue + } + + if len(ing.Spec.TLS) == 0 { + glog.V(3).Infof("ingress %v/%v for host %v does not contains a TLS section", ing.Namespace, ing.Name, host) + continue + } + + tlsSecretName := "" + found := false + for _, tls := range ing.Spec.TLS { + if sets.NewString(tls.Hosts...).Has(host) { + tlsSecretName = tls.SecretName + found = true + break } + } - // the current ing.Spec.Rules[].Host doesn't have an entry at - // ing.Spec.TLS[].Hosts[], skipping to the next Rule - if !found { - continue - } + if !found { + glog.Warningf("ingress %v/%v for host %v contains a TLS section but none of the host match", + ing.Namespace, ing.Name, host) + continue + } - // Current Host listed on ing.Spec.TLS[].Hosts[] - // but TLS[].SecretName is empty; using default cert - if tlsSecretName == "" { - servers[host].SSLCertificate = defaultPemFileName - servers[host].SSLPemChecksum = defaultPemSHA - continue - } + if tlsSecretName == "" { + glog.V(3).Infof("host %v is listed on tls section but secretName is empty. Using default cert", host) + servers[host].SSLCertificate = defaultPemFileName + servers[host].SSLPemChecksum = defaultPemSHA + continue + } - key := fmt.Sprintf("%v/%v", ing.Namespace, tlsSecretName) - bc, exists := ic.sslCertTracker.Get(key) - if exists { - cert := bc.(*ingress.SSLCert) - if isHostValid(host, cert) { - servers[host].SSLCertificate = cert.PemFileName - servers[host].SSLPemChecksum = cert.PemSHA - servers[host].SSLExpireTime = cert.ExpireTime + key := fmt.Sprintf("%v/%v", ing.Namespace, tlsSecretName) + bc, exists := ic.sslCertTracker.Get(key) + if !exists { + glog.Warningf("ssl certificate \"%v\" does not exist in local store", key) + continue + } - if cert.ExpireTime.Before(time.Now().Add(240 * time.Hour)) { - glog.Warningf("ssl certificate for host %v is about to expire in 10 days", host) - } + cert := bc.(*ingress.SSLCert) + err = cert.Certificate.VerifyHostname(host) + if err != nil { + glog.Warningf("ssl certificate %v does not contain a Common Name or Subject Alternative Name for host %v", key, host) + continue + } - } else { - glog.Warningf("ssl certificate %v does not contain a common name for host %v", key, host) - } + servers[host].SSLCertificate = cert.PemFileName + servers[host].SSLPemChecksum = cert.PemSHA + servers[host].SSLExpireTime = cert.ExpireTime - continue - } - - glog.Infof("ssl certificate \"%v\" does not exist in local store", key) + if cert.ExpireTime.Before(time.Now().Add(240 * time.Hour)) { + glog.Warningf("ssl certificate for host %v is about to expire in 10 days", host) } } } @@ -1065,7 +1270,7 @@ func (ic *GenericController) getEndpoints( // avoid duplicated upstream servers when the service // contains multiple port definitions sharing the same // targetport. - adus := make(map[string]bool, 0) + adus := make(map[string]bool) // ExternalName services if s.Spec.Type == api.ServiceTypeExternalName { @@ -1121,6 +1326,7 @@ func (ic *GenericController) getEndpoints( Port: fmt.Sprintf("%v", targetPort), MaxFails: hz.MaxFails, FailTimeout: hz.FailTimeout, + Target: epAddress.TargetRef, } upsServers = append(upsServers, ups) adus[ep] = true @@ -1132,19 +1338,22 @@ func (ic *GenericController) getEndpoints( return upsServers } -// extractSecretNames extracts information about secrets inside the Ingress rule -func (ic GenericController) extractSecretNames(ing *extensions.Ingress) { +// readSecrets extracts information about secrets from an Ingress rule +func (ic *GenericController) readSecrets(ing *extensions.Ingress) { for _, tls := range ing.Spec.TLS { if tls.SecretName == "" { continue } key := fmt.Sprintf("%v/%v", ing.Namespace, tls.SecretName) - _, exists := ic.secretTracker.Get(key) - if !exists { - ic.secretTracker.Add(key, key) - } + ic.syncSecret(key) } + + key, _ := parser.GetStringAnnotation("ingress.kubernetes.io/auth-tls-secret", ing) + if key == "" { + return + } + ic.syncSecret(key) } // Stop stops the loadbalancer controller. @@ -1189,9 +1398,17 @@ func (ic GenericController) Start() { runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync")) } - go ic.syncQueue.Run(10*time.Second, ic.stopCh) + // initial sync of secrets to avoid unnecessary reloads + for _, key := range ic.ingLister.ListKeys() { + if obj, exists, _ := ic.ingLister.GetByKey(key); exists { + ing := obj.(*extensions.Ingress) + ic.readSecrets(ing) + } + } - go wait.Forever(ic.syncSecret, 10*time.Second) + createDefaultSSLCertificate() + + go ic.syncQueue.Run(time.Second, ic.stopCh) if ic.syncStatus != nil { go ic.syncStatus.Run(ic.stopCh) @@ -1199,3 +1416,14 @@ func (ic GenericController) Start() { <-ic.stopCh } + +func createDefaultSSLCertificate() { + defCert, defKey := ssl.GetFakeSSLCert() + c, err := ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{}) + if err != nil { + glog.Fatalf("Error generating self signed certificate: %v", err) + } + + fakeCertificateSHA = c.PemSHA + fakeCertificatePath = c.PemFileName +} diff --git a/core/pkg/ingress/controller/launch.go b/core/pkg/ingress/controller/launch.go index 9bb162488..fd2403f3a 100644 --- a/core/pkg/ingress/controller/launch.go +++ b/core/pkg/ingress/controller/launch.go @@ -7,6 +7,7 @@ import ( "net/http" "net/http/pprof" "os" + "strings" "syscall" "time" @@ -14,9 +15,9 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/pflag" + api "k8s.io/api/core/v1" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/client-go/kubernetes" - api "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmd_api "k8s.io/client-go/tools/clientcmd/api" @@ -55,18 +56,18 @@ func NewIngressController(backend ingress.Controller) *GenericController { tcpConfigMapName = flags.String("tcp-services-configmap", "", `Name of the ConfigMap that contains the definition of the TCP services to expose. The key in the map indicates the external port to be used. The value is the name of the - service with the format namespace/serviceName and the port of the service could be a + service with the format namespace/serviceName and the port of the service could be a number of the name of the port. The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend`) udpConfigMapName = flags.String("udp-services-configmap", "", `Name of the ConfigMap that contains the definition of the UDP services to expose. The key in the map indicates the external port to be used. The value is the name of the - service with the format namespace/serviceName and the port of the service could be a + service with the format namespace/serviceName and the port of the service could be a number of the name of the port.`) - resyncPeriod = flags.Duration("sync-period", 60*time.Second, - `Relist and confirm cloud resources this often.`) + resyncPeriod = flags.Duration("sync-period", 600*time.Second, + `Relist and confirm cloud resources this often. Default is 10 minutes`) watchNamespace = flags.String("watch-namespace", api.NamespaceAll, `Namespace to watch for Ingress. Default is to watch all namespaces`) @@ -75,20 +76,30 @@ func NewIngressController(backend ingress.Controller) *GenericController { profiling = flags.Bool("profiling", true, `Enable profiling via web interface host:port/debug/pprof/`) - defSSLCertificate = flags.String("default-ssl-certificate", "", `Name of the secret + defSSLCertificate = flags.String("default-ssl-certificate", "", `Name of the secret that contains a SSL certificate to be used as default for a HTTPS catch-all server`) - defHealthzURL = flags.String("health-check-path", "/healthz", `Defines + defHealthzURL = flags.String("health-check-path", "/healthz", `Defines the URL to be used as health check inside in the default server in NGINX.`) - updateStatus = flags.Bool("update-status", true, `Indicates if the + updateStatus = flags.Bool("update-status", true, `Indicates if the ingress controller should update the Ingress status IP/hostname. Default is true`) electionID = flags.String("election-id", "ingress-controller-leader", `Election id to use for status update.`) forceIsolation = flags.Bool("force-namespace-isolation", false, - `Force namespace isolation. This flag is required to avoid the reference of secrets or + `Force namespace isolation. This flag is required to avoid the reference of secrets or configmaps located in a different namespace than the specified in the flag --watch-namespace.`) + + disableNodeList = flags.Bool("disable-node-list", false, + `Disable querying nodes. If --force-namespace-isolation is true, this should also be set.`) + + updateStatusOnShutdown = flags.Bool("update-status-on-shutdown", true, `Indicates if the + ingress controller should update the Ingress status IP/hostname when the controller + is being stopped. Default is true`) + + sortBackends = flags.Bool("sort-backends", false, + `Defines if backends and it's endpoints should be sorted`) ) flags.AddGoFlagSet(flag.CommandLine) @@ -115,6 +126,9 @@ func NewIngressController(backend ingress.Controller) *GenericController { _, err = k8s.IsValidService(kubeClient, *defaultSvc) if err != nil { + if strings.Contains(err.Error(), "cannot get services in the namespace") { + glog.Fatalf("✖ It seems the cluster it is running with Authorization enabled (like RBAC) and there is no permissions for the ingress controller. Please check the configuration") + } glog.Fatalf("no service with name %v found: %v", *defaultSvc, err) } glog.Infof("validated %v as the default backend", *defaultSvc) @@ -126,11 +140,15 @@ func NewIngressController(backend ingress.Controller) *GenericController { } if len(svc.Status.LoadBalancer.Ingress) == 0 { - // We could poll here, but we instead just exit and rely on k8s to restart us - glog.Fatalf("service %s does not (yet) have ingress points", *publishSvc) + if len(svc.Spec.ExternalIPs) > 0 { + glog.Infof("service %v validated as assigned with externalIP", *publishSvc) + } else { + // We could poll here, but we instead just exit and rely on k8s to restart us + glog.Fatalf("service %s does not (yet) have ingress points", *publishSvc) + } + } else { + glog.Infof("service %v validated as source of Ingress status", *publishSvc) } - - glog.Infof("service %v validated as source of Ingress status", *publishSvc) } if *watchNamespace != "" { @@ -142,6 +160,10 @@ func NewIngressController(backend ingress.Controller) *GenericController { } } + if resyncPeriod.Seconds() < 10 { + glog.Fatalf("resync period (%vs) is too low", resyncPeriod.Seconds()) + } + err = os.MkdirAll(ingress.DefaultSSLDirectory, 0655) if err != nil { glog.Errorf("Failed to mkdir SSL directory: %v", err) @@ -164,6 +186,9 @@ func NewIngressController(backend ingress.Controller) *GenericController { PublishService: *publishSvc, Backend: backend, ForceNamespaceIsolation: *forceIsolation, + DisableNodeList: *disableNodeList, + UpdateStatusOnShutdown: *updateStatusOnShutdown, + SortBackends: *sortBackends, } ic := newIngressController(config) @@ -254,13 +279,21 @@ func createApiserverClient(apiserverHost string, kubeConfig string) (*kubernetes cfg.Burst = defaultBurst cfg.ContentType = "application/vnd.kubernetes.protobuf" - glog.Infof("Creating API server client for %s", cfg.Host) + glog.Infof("Creating API client for %s", cfg.Host) client, err := kubernetes.NewForConfig(cfg) - if err != nil { return nil, err } + + v, err := client.Discovery().ServerVersion() + if err != nil { + return nil, err + } + + glog.Infof("Running in Kubernetes Cluster version v%v.%v (%v) - git (%v) commit %v - platform %v", + v.Major, v.Minor, v.GitVersion, v.GitTreeState, v.GitCommit, v.Platform) + return client, nil } diff --git a/core/pkg/ingress/controller/named_port.go b/core/pkg/ingress/controller/named_port.go deleted file mode 100644 index 99e43c826..000000000 --- a/core/pkg/ingress/controller/named_port.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/util/intstr" - api_v1 "k8s.io/client-go/pkg/api/v1" -) - -// FindPort locates the container port for the given pod and portName. If the -// targetPort is a number, use that. If the targetPort is a string, look that -// string up in all named ports in all containers in the target pod. If no -// match is found, fail. -func findPort(pod *api_v1.Pod, svcPort *api_v1.ServicePort) (int, error) { - portName := svcPort.TargetPort - switch portName.Type { - case intstr.String: - name := portName.StrVal - for _, container := range pod.Spec.Containers { - for _, port := range container.Ports { - if port.Name == name && port.Protocol == svcPort.Protocol { - return int(port.ContainerPort), nil - } - } - } - case intstr.Int: - return portName.IntValue(), nil - } - - return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) -} diff --git a/core/pkg/ingress/controller/named_port_test.go b/core/pkg/ingress/controller/named_port_test.go deleted file mode 100644 index 09637cb25..000000000 --- a/core/pkg/ingress/controller/named_port_test.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" -) - -func buildSimpleClientSet() *fake.Clientset { - return fake.NewSimpleClientset( - &api_v1.PodList{ - Items: []api_v1.Pod{ - { - ObjectMeta: meta_v1.ObjectMeta{ - Name: "foo1", - Namespace: api.NamespaceDefault, - Labels: map[string]string{ - "lable_sig": "foo_pod", - }, - }, - Spec: api_v1.PodSpec{ - NodeName: "foo_node_1", - Containers: []api_v1.Container{ - { - Ports: []api_v1.ContainerPort{ - { - Name: "foo1_named_port_c1", - Protocol: api_v1.ProtocolTCP, - ContainerPort: 80, - }, - }, - }, - }, - }, - }, - { - ObjectMeta: meta_v1.ObjectMeta{ - Name: "foo1", - Namespace: api.NamespaceSystem, - Labels: map[string]string{ - "lable_sig": "foo_pod", - }, - }, - }, - }, - }, - &api_v1.ServiceList{Items: []api_v1.Service{ - { - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: api.NamespaceDefault, - Name: "named_port_test_service", - }, - }, - }}, - ) -} - -func buildGenericController() *GenericController { - return &GenericController{ - cfg: &Configuration{ - Client: buildSimpleClientSet(), - }, - } -} - -func buildService() *api_v1.Service { - return &api_v1.Service{ - ObjectMeta: meta_v1.ObjectMeta{ - Namespace: api.NamespaceSystem, - Name: "named_port_test_service", - }, - Spec: api_v1.ServiceSpec{ - ClusterIP: "10.10.10.10", - }, - } -} diff --git a/core/pkg/ingress/controller/util.go b/core/pkg/ingress/controller/util.go index 4feb882ba..349fe9599 100644 --- a/core/pkg/ingress/controller/util.go +++ b/core/pkg/ingress/controller/util.go @@ -17,105 +17,31 @@ limitations under the License. package controller import ( - "strings" - "unicode/utf8" - "github.com/golang/glog" "github.com/imdario/mergo" + api "k8s.io/api/core/v1" + "k8s.io/ingress/core/pkg/ingress" ) // DeniedKeyName name of the key that contains the reason to deny a location const DeniedKeyName = "Denied" -// newDefaultServer return an BackendServer to be use as default server that returns 503. -func newDefaultServer() ingress.Endpoint { - return ingress.Endpoint{Address: "127.0.0.1", Port: "8181"} -} - // newUpstream creates an upstream without servers. func newUpstream(name string) *ingress.Backend { return &ingress.Backend{ Name: name, Endpoints: []ingress.Endpoint{}, + Service: &api.Service{}, + SessionAffinity: ingress.SessionAffinityConfig{ + CookieSessionAffinity: ingress.CookieSessionAffinity{ + Locations: make(map[string][]string), + }, + }, } } -func isHostValid(host string, cert *ingress.SSLCert) bool { - if cert == nil { - return false - } - - lowered := toLowerCaseASCII(host) - for _, cn := range cert.CN { - if matchHostnames(toLowerCaseASCII(cn), lowered) { - return true - } - } - - return false -} - -func matchHostnames(pattern, host string) bool { - host = strings.TrimSuffix(host, ".") - pattern = strings.TrimSuffix(pattern, ".") - - if len(pattern) == 0 || len(host) == 0 { - return false - } - - patternParts := strings.Split(pattern, ".") - hostParts := strings.Split(host, ".") - - if len(patternParts) != len(hostParts) { - return false - } - - for i, patternPart := range patternParts { - if i == 0 && patternPart == "*" { - continue - } - if patternPart != hostParts[i] { - return false - } - } - - return true -} - -// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use -// an explicitly ASCII function to avoid any sharp corners resulting from -// performing Unicode operations on DNS labels. -func toLowerCaseASCII(in string) string { - // If the string is already lower-case then there's nothing to do. - isAlreadyLowerCase := true - for _, c := range in { - if c == utf8.RuneError { - // If we get a UTF-8 error then there might be - // upper-case ASCII bytes in the invalid sequence. - isAlreadyLowerCase = false - break - } - if 'A' <= c && c <= 'Z' { - isAlreadyLowerCase = false - break - } - } - - if isAlreadyLowerCase { - return in - } - - out := []byte(in) - for i, c := range out { - if 'A' <= c && c <= 'Z' { - out[i] += 'a' - 'A' - } - } - return string(out) -} - func mergeLocationAnnotations(loc *ingress.Location, anns map[string]interface{}) { if _, ok := anns[DeniedKeyName]; ok { loc.Denied = anns[DeniedKeyName].(error) diff --git a/core/pkg/ingress/controller/util_test.go b/core/pkg/ingress/controller/util_test.go index 2aa6d5a20..3847cb2ba 100644 --- a/core/pkg/ingress/controller/util_test.go +++ b/core/pkg/ingress/controller/util_test.go @@ -23,10 +23,10 @@ import ( "k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/ingress/annotations/auth" "k8s.io/ingress/core/pkg/ingress/annotations/authreq" - "k8s.io/ingress/core/pkg/ingress/annotations/authtls" "k8s.io/ingress/core/pkg/ingress/annotations/ipwhitelist" "k8s.io/ingress/core/pkg/ingress/annotations/proxy" "k8s.io/ingress/core/pkg/ingress/annotations/ratelimit" + "k8s.io/ingress/core/pkg/ingress/annotations/redirect" "k8s.io/ingress/core/pkg/ingress/annotations/rewrite" ) @@ -36,57 +36,6 @@ func (fe *fakeError) Error() string { return "fakeError" } -func TestIsHostValid(t *testing.T) { - fkCert := &ingress.SSLCert{ - CAFileName: "foo", - PemFileName: "foo.cr", - PemSHA: "perha", - CN: []string{ - "*.cluster.local", "default.local", - }, - } - - fooTests := []struct { - cr *ingress.SSLCert - host string - er bool - }{ - {nil, "foo1.cluster.local", false}, - {fkCert, "foo1.cluster.local", true}, - {fkCert, "default.local", true}, - {fkCert, "foo2.cluster.local.t", false}, - {fkCert, "", false}, - } - - for _, foo := range fooTests { - r := isHostValid(foo.host, foo.cr) - if r != foo.er { - t.Errorf("Returned %v but expected %v for foo=%v", r, foo.er, foo) - } - } -} - -func TestMatchHostnames(t *testing.T) { - fooTests := []struct { - pattern string - host string - er bool - }{ - {"*.cluster.local.", "foo1.cluster.local.", true}, - {"foo1.cluster.local.", "foo2.cluster.local.", false}, - {"cluster.local.", "foo1.cluster.local.", false}, - {".", "foo1.cluster.local.", false}, - {"cluster.local.", ".", false}, - } - - for _, foo := range fooTests { - r := matchHostnames(foo.pattern, foo.host) - if r != foo.er { - t.Errorf("Returned %v but expected %v for foo=%v", r, foo.er, foo) - } - } -} - func TestMergeLocationAnnotations(t *testing.T) { // initial parameters loc := ingress.Location{} @@ -99,10 +48,10 @@ func TestMergeLocationAnnotations(t *testing.T) { "EnableCORS": true, "ExternalAuth": authreq.External{}, "RateLimit": ratelimit.RateLimit{}, - "Redirect": rewrite.Redirect{}, + "Redirect": redirect.Redirect{}, + "Rewrite": rewrite.Redirect{}, "Whitelist": ipwhitelist.SourceRange{}, "Proxy": proxy.Configuration{}, - "CertificateAuth": authtls.AuthSSLConfig{}, "UsePortInRedirects": true, } diff --git a/core/pkg/ingress/defaults/main.go b/core/pkg/ingress/defaults/main.go index 92f5a72a2..20b0a89fb 100644 --- a/core/pkg/ingress/defaults/main.go +++ b/core/pkg/ingress/defaults/main.go @@ -49,6 +49,17 @@ type Backend struct { // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain ProxyCookieDomain string `json:"proxy-cookie-domain"` + // Specifies in which cases a request should be passed to the next server. + // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream + ProxyNextUpstream string `json:"proxy-next-upstream"` + + // Parameters for proxy-pass directive (eg. Apache web server). + ProxyPassParams string `json:"proxy-pass-params"` + + // Enables or disables buffering of a client request body. + // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering + ProxyRequestBuffering string `json:"proxy-request-buffering"` + // Name server/s used to resolve names of upstream servers into IP addresses. // The file /etc/resolv.conf is used as DNS resolution configuration. Resolver []net.IP @@ -84,4 +95,15 @@ type Backend struct { // WhitelistSourceRange allows limiting access to certain client addresses // http://nginx.org/en/docs/http/ngx_http_access_module.html WhitelistSourceRange []string `json:"whitelist-source-range,-"` + + // Limits the rate of response transmission to a client. + // The rate is specified in bytes per second. The zero value disables rate limiting. + // The limit is set per a request, and so if a client simultaneously opens two connections, + // the overall rate will be twice as much as the specified limit. + // http://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate + LimitRate int `json:"limit-rate"` + + // Sets the initial amount after which the further transmission of a response to a client will be rate limited. + // http://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate_after + LimitRateAfter int `json:"limit-rate-after"` } diff --git a/core/pkg/ingress/errors/errors.go b/core/pkg/ingress/errors/errors.go index 635df3944..1a9db9ba4 100644 --- a/core/pkg/ingress/errors/errors.go +++ b/core/pkg/ingress/errors/errors.go @@ -83,3 +83,11 @@ func IsInvalidContent(e error) bool { _, ok := e.(InvalidContent) return ok } + +func New(m string) error { + return errors.New(m) +} + +func Errorf(format string, args ...interface{}) error { + return errors.Errorf(format, args) +} diff --git a/core/pkg/ingress/resolver/main.go b/core/pkg/ingress/resolver/main.go index 0a701fb31..b76ff6fc2 100644 --- a/core/pkg/ingress/resolver/main.go +++ b/core/pkg/ingress/resolver/main.go @@ -17,7 +17,7 @@ limitations under the License. package resolver import ( - api "k8s.io/client-go/pkg/api/v1" + api "k8s.io/api/core/v1" "k8s.io/ingress/core/pkg/ingress/defaults" ) @@ -41,6 +41,12 @@ type AuthCertificate interface { GetAuthCertificate(string) (*AuthSSLCert, error) } +// Service has a method that searches for services contenating +// the namespace and name using a the character / +type Service interface { + GetService(string) (*api.Service, error) +} + // AuthSSLCert contains the necessary information to do certificate based // authentication of an ingress location type AuthSSLCert struct { @@ -51,3 +57,18 @@ type AuthSSLCert struct { // PemSHA contains the SHA1 hash of the 'tls.crt' value PemSHA string `json:"pemSha"` } + +// Equal tests for equality between two AuthSSLCert types +func (asslc1 *AuthSSLCert) Equal(assl2 *AuthSSLCert) bool { + if asslc1.Secret != assl2.Secret { + return false + } + if asslc1.CAFileName != assl2.CAFileName { + return false + } + if asslc1.PemSHA != assl2.PemSHA { + return false + } + + return true +} diff --git a/core/pkg/ingress/sort_ingress.go b/core/pkg/ingress/sort_ingress.go index a777683a1..2de09cd26 100644 --- a/core/pkg/ingress/sort_ingress.go +++ b/core/pkg/ingress/sort_ingress.go @@ -17,9 +17,11 @@ limitations under the License. package ingress import ( + "crypto/x509" + "time" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" - "time" ) // BackendByNameServers sorts upstreams by name @@ -71,6 +73,7 @@ func (c LocationByPath) Less(i, j int) bool { // SSLCert describes a SSL certificate to be used in a server type SSLCert struct { meta_v1.ObjectMeta `json:"metadata,omitempty"` + Certificate *x509.Certificate `json:"certificate,omitempty"` // CAFileName contains the path to the file with the root certificate CAFileName string `json:"caFileName"` // PemFileName contains the path to the file with the certificate and key concatenated diff --git a/core/pkg/ingress/status/election.go b/core/pkg/ingress/status/election.go deleted file mode 100644 index f1cd6e866..000000000 --- a/core/pkg/ingress/status/election.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "encoding/json" - "os" - "time" - - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - client "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - api "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/tools/record" - - "k8s.io/ingress/core/pkg/ingress/status/leaderelection" - "k8s.io/ingress/core/pkg/ingress/status/leaderelection/resourcelock" -) - -func getCurrentLeader(electionID, namespace string, c client.Interface) (string, *api.Endpoints, error) { - endpoints, err := c.Core().Endpoints(namespace).Get(electionID, meta_v1.GetOptions{}) - if err != nil { - return "", nil, err - } - val, found := endpoints.Annotations[resourcelock.LeaderElectionRecordAnnotationKey] - if !found { - return "", endpoints, nil - } - electionRecord := resourcelock.LeaderElectionRecord{} - if err = json.Unmarshal([]byte(val), &electionRecord); err != nil { - return "", nil, err - } - return electionRecord.HolderIdentity, endpoints, err -} - -// NewElection creates an election. 'namespace'/'election' should be an existing Kubernetes Service -// 'id' is the id if this leader, should be unique. -func NewElection(electionID, - id, - namespace string, - ttl time.Duration, - callback func(leader string), - c client.Interface) (*leaderelection.LeaderElector, error) { - - _, err := c.Core().Endpoints(namespace).Get(electionID, meta_v1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - _, err = c.Core().Endpoints(namespace).Create(&api.Endpoints{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: electionID, - }, - }) - if err != nil && !errors.IsConflict(err) { - return nil, err - } - } else { - return nil, err - } - } - - callbacks := leaderelection.LeaderCallbacks{ - OnStartedLeading: func(stop <-chan struct{}) { - callback(id) - }, - OnStoppedLeading: func() { - leader, _, err := getCurrentLeader(electionID, namespace, c) - if err != nil { - glog.Errorf("failed to get leader: %v", err) - // empty string means leader is unknown - callback("") - return - } - callback(leader) - }, - } - - broadcaster := record.NewBroadcaster() - hostname, err := os.Hostname() - if err != nil { - return nil, err - } - recorder := broadcaster.NewRecorder(scheme.Scheme, api.EventSource{ - Component: "ingress-leader-elector", - Host: hostname, - }) - - lock := resourcelock.ConfigMapLock{ - ConfigMapMeta: meta_v1.ObjectMeta{Namespace: namespace, Name: electionID}, - Client: c, - LockConfig: resourcelock.ResourceLockConfig{ - Identity: id, - EventRecorder: recorder, - }, - } - - config := leaderelection.LeaderElectionConfig{ - Lock: &lock, - LeaseDuration: ttl, - RenewDeadline: ttl / 2, - RetryPeriod: ttl / 4, - Callbacks: callbacks, - } - - return leaderelection.NewLeaderElector(config) -} diff --git a/core/pkg/ingress/status/election_test.go b/core/pkg/ingress/status/election_test.go deleted file mode 100644 index 3e6504d00..000000000 --- a/core/pkg/ingress/status/election_test.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "encoding/json" - "testing" - "time" - - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" - - "k8s.io/ingress/core/pkg/ingress/status/leaderelection/resourcelock" -) - -func TestGetCurrentLeaderLeaderExist(t *testing.T) { - fkER := resourcelock.LeaderElectionRecord{ - HolderIdentity: "currentLeader", - LeaseDurationSeconds: 30, - AcquireTime: meta_v1.NewTime(time.Now()), - RenewTime: meta_v1.NewTime(time.Now()), - LeaderTransitions: 3, - } - leaderInfo, _ := json.Marshal(fkER) - fkEndpoints := api_v1.Endpoints{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: "ingress-controller-test", - Namespace: api.NamespaceSystem, - Annotations: map[string]string{ - resourcelock.LeaderElectionRecordAnnotationKey: string(leaderInfo), - }, - }, - } - fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{fkEndpoints}}) - identity, endpoints, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk) - if err != nil { - t.Fatalf("expected identitiy and endpoints but returned error %s", err) - } - - if endpoints == nil { - t.Fatalf("returned nil but expected an endpoints") - } - - if identity != "currentLeader" { - t.Fatalf("returned %v but expected %v", identity, "currentLeader") - } -} - -func TestGetCurrentLeaderLeaderNotExist(t *testing.T) { - fkEndpoints := api_v1.Endpoints{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: "ingress-controller-test", - Namespace: api.NamespaceSystem, - Annotations: map[string]string{}, - }, - } - fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{fkEndpoints}}) - identity, endpoints, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk) - if err != nil { - t.Fatalf("unexpeted error: %v", err) - } - - if endpoints == nil { - t.Fatalf("returned nil but expected an endpoints") - } - - if identity != "" { - t.Fatalf("returned %s but expected %s", identity, "") - } -} - -func TestGetCurrentLeaderAnnotationError(t *testing.T) { - fkEndpoints := api_v1.Endpoints{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: "ingress-controller-test", - Namespace: api.NamespaceSystem, - Annotations: map[string]string{ - resourcelock.LeaderElectionRecordAnnotationKey: "just-test-error-leader-annotation", - }, - }, - } - fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{fkEndpoints}}) - _, _, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk) - if err == nil { - t.Errorf("expected error") - } -} - -func TestNewElection(t *testing.T) { - fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{ - { - ObjectMeta: meta_v1.ObjectMeta{ - Name: "ingress-controller-test", - Namespace: api.NamespaceSystem, - }, - }, - { - ObjectMeta: meta_v1.ObjectMeta{ - Name: "ingress-controller-test-020", - Namespace: api.NamespaceSystem, - }, - }, - }}) - - ne, err := NewElection("ingress-controller-test", "startLeader", api.NamespaceSystem, 4*time.Second, func(leader string) { - // do nothing - go t.Logf("execute callback fun, leader is: %s", leader) - }, fk) - if err != nil { - t.Fatalf("unexpected error %v", err) - } - - if ne == nil { - t.Fatalf("unexpected nil") - } -} diff --git a/core/pkg/ingress/status/status.go b/core/pkg/ingress/status/status.go index aa20b4728..219642d69 100644 --- a/core/pkg/ingress/status/status.go +++ b/core/pkg/ingress/status/status.go @@ -19,21 +19,25 @@ package status import ( "fmt" "net" + "os" "sort" "sync" "time" "github.com/golang/glog" + v1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - api_v1 "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" "k8s.io/ingress/core/pkg/ingress/annotations/class" - "k8s.io/ingress/core/pkg/ingress/status/leaderelection" "k8s.io/ingress/core/pkg/ingress/store" "k8s.io/ingress/core/pkg/k8s" "k8s.io/ingress/core/pkg/strings" @@ -55,10 +59,16 @@ type Config struct { Client clientset.Interface PublishService string IngressLister store.IngressLister - ElectionID string + + ElectionID string + + UpdateStatusOnShutdown bool DefaultIngressClass string IngressClass string + + // CustomIngressStatus allows to set custom values in Ingress status + CustomIngressStatus func(*extensions.Ingress) []v1.LoadBalancerIngress } // statusSync keeps the status IP in each Ingress rule updated executing a periodic check @@ -75,20 +85,21 @@ type statusSync struct { // workqueue used to keep in sync the status IP/s // in the Ingress rules syncQueue *task.Queue - - runLock *sync.Mutex } // Run starts the loop to keep the status in sync func (s statusSync) Run(stopCh <-chan struct{}) { - go wait.Forever(s.elector.Run, 0) - go s.run() - + go s.elector.Run() + go wait.Forever(s.update, updateInterval) go s.syncQueue.Run(time.Second, stopCh) - <-stopCh } +func (s *statusSync) update() { + // send a dummy object to the queue to force a sync + s.syncQueue.Enqueue("sync status") +} + // Shutdown stop the sync. In case the instance is the leader it will remove the current IP // if there is no other instances running. func (s statusSync) Shutdown() { @@ -98,9 +109,14 @@ func (s statusSync) Shutdown() { return } + if !s.UpdateStatusOnShutdown { + glog.Warningf("skipping update of status of Ingress rules") + return + } + glog.Infof("updating status of Ingress rules (remove)") - addrs, err := s.runningAddresess() + addrs, err := s.runningAddresses() if err != nil { glog.Errorf("error obtaining running IPs: %v", addrs) return @@ -118,27 +134,10 @@ func (s statusSync) Shutdown() { } glog.Infof("removing address from ingress status (%v)", addrs) - s.updateStatus([]api_v1.LoadBalancerIngress{}) -} - -func (s *statusSync) run() { - err := wait.PollInfinite(updateInterval, func() (bool, error) { - if s.syncQueue.IsShuttingDown() { - return true, nil - } - // send a dummy object to the queue to force a sync - s.syncQueue.Enqueue("dummy") - return false, nil - }) - if err != nil { - glog.Errorf("error waiting shutdown: %v", err) - } + s.updateStatus([]v1.LoadBalancerIngress{}) } func (s *statusSync) sync(key interface{}) error { - s.runLock.Lock() - defer s.runLock.Unlock() - if s.syncQueue.IsShuttingDown() { glog.V(2).Infof("skipping Ingress status update (shutting down in progress)") return nil @@ -149,7 +148,7 @@ func (s *statusSync) sync(key interface{}) error { return nil } - addrs, err := s.runningAddresess() + addrs, err := s.runningAddresses() if err != nil { return err } @@ -158,18 +157,6 @@ func (s *statusSync) sync(key interface{}) error { return nil } -// callback invoked function when a new leader is elected -func (s *statusSync) callback(leader string) { - if s.syncQueue.IsShuttingDown() { - return - } - - glog.V(2).Infof("new leader elected (%v)", leader) - if leader == s.pod.Name { - glog.V(2).Infof("I am the new status update leader") - } -} - func (s statusSync) keyfunc(input interface{}) (interface{}, error) { return input, nil } @@ -182,35 +169,71 @@ func NewStatusSyncer(config Config) Sync { } st := statusSync{ - pod: pod, - runLock: &sync.Mutex{}, - Config: config, + pod: pod, + + Config: config, } st.syncQueue = task.NewCustomTaskQueue(st.sync, st.keyfunc) // we need to use the defined ingress class to allow multiple leaders // in order to update information about ingress status - id := fmt.Sprintf("%v-%v", config.ElectionID, config.DefaultIngressClass) + electionID := fmt.Sprintf("%v-%v", config.ElectionID, config.DefaultIngressClass) if config.IngressClass != "" { - id = fmt.Sprintf("%v-%v", config.ElectionID, config.IngressClass) + electionID = fmt.Sprintf("%v-%v", config.ElectionID, config.IngressClass) } - le, err := NewElection(id, - pod.Name, pod.Namespace, 30*time.Second, - st.callback, config.Client) + callbacks := leaderelection.LeaderCallbacks{ + OnStartedLeading: func(stop <-chan struct{}) { + glog.V(2).Infof("I am the new status update leader") + }, + OnStoppedLeading: func() { + glog.V(2).Infof("I am not status update leader anymore") + }, + OnNewLeader: func(identity string) { + glog.Infof("new leader elected: %v", identity) + }, + } + + broadcaster := record.NewBroadcaster() + hostname, _ := os.Hostname() + + recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{ + Component: "ingress-leader-elector", + Host: hostname, + }) + + lock := resourcelock.ConfigMapLock{ + ConfigMapMeta: meta_v1.ObjectMeta{Namespace: pod.Namespace, Name: electionID}, + Client: config.Client.CoreV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: pod.Name, + EventRecorder: recorder, + }, + } + + ttl := 30 * time.Second + le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ + Lock: &lock, + LeaseDuration: ttl, + RenewDeadline: ttl / 2, + RetryPeriod: ttl / 4, + Callbacks: callbacks, + }) + if err != nil { glog.Fatalf("unexpected error starting leader election: %v", err) } + st.elector = le return st } -// runningAddresess returns a list of IP addresses and/or FQDN where the +// runningAddresses returns a list of IP addresses and/or FQDN where the // ingress controller is currently running -func (s *statusSync) runningAddresess() ([]string, error) { +func (s *statusSync) runningAddresses() ([]string, error) { if s.PublishService != "" { ns, name, _ := k8s.ParseNameNS(s.PublishService) - svc, err := s.Client.Core().Services(ns).Get(name, meta_v1.GetOptions{}) + svc, err := s.Client.CoreV1().Services(ns).Get(name, meta_v1.GetOptions{}) if err != nil { return nil, err } @@ -223,12 +246,15 @@ func (s *statusSync) runningAddresess() ([]string, error) { addrs = append(addrs, ip.IP) } } + for _, ip := range svc.Spec.ExternalIPs { + addrs = append(addrs, ip) + } return addrs, nil } // get information about all the pods running the ingress controller - pods, err := s.Client.Core().Pods(s.pod.Namespace).List(meta_v1.ListOptions{ + pods, err := s.Client.CoreV1().Pods(s.pod.Namespace).List(meta_v1.ListOptions{ LabelSelector: labels.SelectorFromSet(s.pod.Labels).String(), }) if err != nil { @@ -246,7 +272,7 @@ func (s *statusSync) runningAddresess() ([]string, error) { } func (s *statusSync) isRunningMultiplePods() bool { - pods, err := s.Client.Core().Pods(s.pod.Namespace).List(meta_v1.ListOptions{ + pods, err := s.Client.CoreV1().Pods(s.pod.Namespace).List(meta_v1.ListOptions{ LabelSelector: labels.SelectorFromSet(s.pod.Labels).String(), }) if err != nil { @@ -257,13 +283,13 @@ func (s *statusSync) isRunningMultiplePods() bool { } // sliceToStatus converts a slice of IP and/or hostnames to LoadBalancerIngress -func sliceToStatus(endpoints []string) []api_v1.LoadBalancerIngress { - lbi := []api_v1.LoadBalancerIngress{} +func sliceToStatus(endpoints []string) []v1.LoadBalancerIngress { + lbi := []v1.LoadBalancerIngress{} for _, ep := range endpoints { if net.ParseIP(ep) == nil { - lbi = append(lbi, api_v1.LoadBalancerIngress{Hostname: ep}) + lbi = append(lbi, v1.LoadBalancerIngress{Hostname: ep}) } else { - lbi = append(lbi, api_v1.LoadBalancerIngress{IP: ep}) + lbi = append(lbi, v1.LoadBalancerIngress{IP: ep}) } } @@ -271,7 +297,10 @@ func sliceToStatus(endpoints []string) []api_v1.LoadBalancerIngress { return lbi } -func (s *statusSync) updateStatus(newIPs []api_v1.LoadBalancerIngress) { +// updateStatus changes the status information of Ingress rules +// If the backend function CustomIngressStatus returns a value different +// of nil then it uses the returned value or the newIngressPoint values +func (s *statusSync) updateStatus(newIngressPoint []v1.LoadBalancerIngress) { ings := s.IngressLister.List() var wg sync.WaitGroup wg.Add(len(ings)) @@ -292,15 +321,21 @@ func (s *statusSync) updateStatus(newIPs []api_v1.LoadBalancerIngress) { return } + addrs := newIngressPoint + ca := s.CustomIngressStatus(currIng) + if ca != nil { + addrs = ca + } + curIPs := currIng.Status.LoadBalancer.Ingress sort.Sort(loadBalancerIngressByIP(curIPs)) - if ingressSliceEqual(newIPs, curIPs) { + if ingressSliceEqual(addrs, curIPs) { glog.V(3).Infof("skipping update of Ingress %v/%v (no change)", currIng.Namespace, currIng.Name) return } - glog.Infof("updating Ingress %v/%v status to %v", currIng.Namespace, currIng.Name, newIPs) - currIng.Status.LoadBalancer.Ingress = newIPs + glog.Infof("updating Ingress %v/%v status to %v", currIng.Namespace, currIng.Name, addrs) + currIng.Status.LoadBalancer.Ingress = addrs _, err = ingClient.UpdateStatus(currIng) if err != nil { glog.Warningf("error updating ingress rule: %v", err) @@ -311,7 +346,7 @@ func (s *statusSync) updateStatus(newIPs []api_v1.LoadBalancerIngress) { wg.Wait() } -func ingressSliceEqual(lhs, rhs []api_v1.LoadBalancerIngress) bool { +func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool { if len(lhs) != len(rhs) { return false } @@ -328,7 +363,7 @@ func ingressSliceEqual(lhs, rhs []api_v1.LoadBalancerIngress) bool { } // loadBalancerIngressByIP sorts LoadBalancerIngress using the field IP -type loadBalancerIngressByIP []api_v1.LoadBalancerIngress +type loadBalancerIngressByIP []v1.LoadBalancerIngress func (c loadBalancerIngressByIP) Len() int { return len(c) } func (c loadBalancerIngressByIP) Swap(i, j int) { c[i], c[j] = c[j], c[i] } diff --git a/core/pkg/ingress/status/status_test.go b/core/pkg/ingress/status/status_test.go index 4b97c46a9..d248deb84 100644 --- a/core/pkg/ingress/status/status_test.go +++ b/core/pkg/ingress/status/status_test.go @@ -19,16 +19,15 @@ package status import ( "os" "sort" - "sync" "testing" "time" + api_v1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" testclient "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/pkg/api" - api_v1 "k8s.io/client-go/pkg/api/v1" - extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/api" "k8s.io/ingress/core/pkg/ingress/annotations/class" cache_store "k8s.io/ingress/core/pkg/ingress/store" @@ -245,12 +244,14 @@ func buildStatusSync() statusSync { "lable_sig": "foo_pod", }, }, - runLock: &sync.Mutex{}, syncQueue: task.NewTaskQueue(fakeSynFn), Config: Config{ Client: buildSimpleClientSet(), PublishService: api_v1.NamespaceDefault + "/" + "foo", IngressLister: buildIngressListener(), + CustomIngressStatus: func(*extensions.Ingress) []api_v1.LoadBalancerIngress { + return nil + }, }, } } @@ -260,11 +261,15 @@ func TestStatusActions(t *testing.T) { os.Setenv("POD_NAME", "foo1") os.Setenv("POD_NAMESPACE", api_v1.NamespaceDefault) c := Config{ - Client: buildSimpleClientSet(), - PublishService: "", - IngressLister: buildIngressListener(), - DefaultIngressClass: "nginx", - IngressClass: "", + Client: buildSimpleClientSet(), + PublishService: "", + IngressLister: buildIngressListener(), + DefaultIngressClass: "nginx", + IngressClass: "", + UpdateStatusOnShutdown: true, + CustomIngressStatus: func(*extensions.Ingress) []api_v1.LoadBalancerIngress { + return nil + }, } // create object fkSync := NewStatusSyncer(c) @@ -321,9 +326,7 @@ func TestStatusActions(t *testing.T) { } func TestCallback(t *testing.T) { - fk := buildStatusSync() - // do nothing - fk.callback("foo_base_pod") + buildStatusSync() } func TestKeyfunc(t *testing.T) { @@ -342,7 +345,7 @@ func TestKeyfunc(t *testing.T) { func TestRunningAddresessWithPublishService(t *testing.T) { fk := buildStatusSync() - r, _ := fk.runningAddresess() + r, _ := fk.runningAddresses() if r == nil { t.Fatalf("returned nil but expected valid []string") } @@ -356,7 +359,7 @@ func TestRunningAddresessWithPods(t *testing.T) { fk := buildStatusSync() fk.PublishService = "" - r, _ := fk.runningAddresess() + r, _ := fk.runningAddresses() if r == nil { t.Fatalf("returned nil but expected valid []string") } diff --git a/core/pkg/ingress/store/main.go b/core/pkg/ingress/store/main.go index 4fb6f7929..182899499 100644 --- a/core/pkg/ingress/store/main.go +++ b/core/pkg/ingress/store/main.go @@ -19,7 +19,7 @@ package store import ( "fmt" - api "k8s.io/client-go/pkg/api/v1" + api "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" ) diff --git a/core/pkg/ingress/type_equals_test.go b/core/pkg/ingress/type_equals_test.go new file mode 100644 index 000000000..26960cb82 --- /dev/null +++ b/core/pkg/ingress/type_equals_test.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" +) + +func TestEqualConfiguration(t *testing.T) { + ap, _ := filepath.Abs("../../../tests/manifests/configuration-a.json") + a, err := readJSON(ap) + if err != nil { + t.Errorf("unexpected error reading JSON file: %v", err) + } + + bp, _ := filepath.Abs("../../../tests/manifests/configuration-b.json") + b, err := readJSON(bp) + if err != nil { + t.Errorf("unexpected error reading JSON file: %v", err) + } + + cp, _ := filepath.Abs("../../../tests/manifests/configuration-c.json") + c, err := readJSON(cp) + if err != nil { + t.Errorf("unexpected error reading JSON file: %v", err) + } + + if !a.Equal(b) { + t.Errorf("expected equal configurations (configuration-a.json and configuration-b.json)") + } + + if !b.Equal(a) { + t.Errorf("expected equal configurations (configuration-b.json and configuration-a.json)") + } + + if a.Equal(c) { + t.Errorf("expected equal configurations (configuration-a.json and configuration-c.json)") + } +} + +func readJSON(p string) (*Configuration, error) { + f, err := os.Open(p) + if err != nil { + return nil, err + } + + var c Configuration + + d := json.NewDecoder(f) + err = d.Decode(&c) + if err != nil { + return nil, err + } + + return &c, nil +} diff --git a/core/pkg/ingress/types.go b/core/pkg/ingress/types.go index 0edb0907a..c0d0dbc0c 100644 --- a/core/pkg/ingress/types.go +++ b/core/pkg/ingress/types.go @@ -17,11 +17,14 @@ limitations under the License. package ingress import ( + "time" + "github.com/spf13/pflag" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apiserver/pkg/server/healthz" - api "k8s.io/client-go/pkg/api/v1" "k8s.io/ingress/core/pkg/ingress/annotations/auth" "k8s.io/ingress/core/pkg/ingress/annotations/authreq" @@ -29,11 +32,11 @@ import ( "k8s.io/ingress/core/pkg/ingress/annotations/ipwhitelist" "k8s.io/ingress/core/pkg/ingress/annotations/proxy" "k8s.io/ingress/core/pkg/ingress/annotations/ratelimit" + "k8s.io/ingress/core/pkg/ingress/annotations/redirect" "k8s.io/ingress/core/pkg/ingress/annotations/rewrite" "k8s.io/ingress/core/pkg/ingress/defaults" "k8s.io/ingress/core/pkg/ingress/resolver" "k8s.io/ingress/core/pkg/ingress/store" - "time" ) var ( @@ -73,8 +76,6 @@ type Controller interface { // https://k8s.io/ingress/core/blob/master/pkg/ingress/types.go#L83 // The backend returns an error if was not possible to update the configuration. // - // The returned configuration is then passed to test, and then to reload - // if there is no errors. OnUpdate(Configuration) error // ConfigMap content of --configmap SetConfig(*api.ConfigMap) @@ -93,6 +94,14 @@ type Controller interface { OverrideFlags(*pflag.FlagSet) // DefaultIngressClass just return the default ingress class DefaultIngressClass() string + // UpdateIngressStatus custom callback used to update the status in an Ingress rule + // This allows custom implementations + // If the function returns nil the standard functions will be executed. + UpdateIngressStatus(*extensions.Ingress) []api.LoadBalancerIngress + // DefaultEndpoint returns the Endpoint to use as default when the + // referenced service does not exists. This should return the content + // of to the default backend + DefaultEndpoint() Endpoint } // StoreLister returns the configured stores for ingresses, services, @@ -125,9 +134,9 @@ type BackendInfo struct { type Configuration struct { // Backends are a list of backends used by all the Ingress rules in the // ingress controller. This list includes the default backend - Backends []*Backend `json:"namespace"` + Backends []*Backend `json:"backends,omitEmpty"` // Servers - Servers []*Server `json:"servers"` + Servers []*Server `json:"servers,omitEmpty"` // TCPEndpoints contain endpoints for tcp streams handled by this backend // +optional TCPEndpoints []L4Service `json:"tcpEndpoints,omitempty"` @@ -141,10 +150,11 @@ type Configuration struct { } // Backend describes one or more remote server/s (endpoints) associated with a service +// +k8s:deepcopy-gen=true type Backend struct { // Name represents an unique api.Service name formatted as -- Name string `json:"name"` - Service *api.Service `json:"service"` + Service *api.Service `json:"service,omitempty"` Port intstr.IntOrString `json:"port"` // This indicates if the communication protocol between the backend and the endpoint is HTTP or HTTPS // Allowing the use of HTTPS @@ -153,13 +163,13 @@ type Backend struct { Secure bool `json:"secure"` // SecureCACert has the filename and SHA1 of the certificate authorities used to validate // a secured connection to the backend - SecureCACert resolver.AuthSSLCert `json:"secureCert"` + SecureCACert resolver.AuthSSLCert `json:"secureCACert"` // SSLPassthrough indicates that Ingress controller will delegate TLS termination to the endpoints. SSLPassthrough bool `json:"sslPassthrough"` // Endpoints contains the list of endpoints currently running - Endpoints []Endpoint `json:"endpoints"` + Endpoints []Endpoint `json:"endpoints,omitempty"` // StickySessionAffinitySession contains the StickyConfig object with stickness configuration - SessionAffinity SessionAffinityConfig + SessionAffinity SessionAffinityConfig `json:"sessionAffinityConfig"` } // SessionAffinityConfig describes different affinity configurations for new sessions. @@ -168,18 +178,22 @@ type Backend struct { // restarts. Exactly one of these values will be set on the upstream, since multiple // affinity values are incompatible. Once set, the backend makes no guarantees // about honoring updates. +// +k8s:deepcopy-gen=true type SessionAffinityConfig struct { - AffinityType string `json:"name"` - CookieSessionAffinity CookieSessionAffinity + AffinityType string `json:"name"` + CookieSessionAffinity CookieSessionAffinity `json:"cookieSessionAffinity"` } // CookieSessionAffinity defines the structure used in Affinity configured by Cookies. +// +k8s:deepcopy-gen=true type CookieSessionAffinity struct { - Name string `json:"name"` - Hash string `json:"hash"` + Name string `json:"name"` + Hash string `json:"hash"` + Locations map[string][]string `json:"locations,omitempty"` } // Endpoint describes a kubernetes endpoint in a backend +// +k8s:deepcopy-gen=true type Endpoint struct { // Address IP address of the endpoint Address string `json:"address"` @@ -193,6 +207,8 @@ type Endpoint struct { // of unsuccessful attempts to communicate with the server should happen // to consider the endpoint unavailable FailTimeout int `json:"failTimeout"` + // Target returns a reference to the object providing the endpoint + Target *api.ObjectReference `json:"target,omipempty"` } // Server describes a website @@ -216,6 +232,13 @@ type Server struct { ServerSnippet string `json:"serverSnippet"` // Locations list of URIs configured in the server. Locations []*Location `json:"locations,omitempty"` + // Alias return the alias of the server name + Alias string `json:"alias,omitempty"` + // RedirectFromToWWW returns if a redirect to/from prefix www is required + RedirectFromToWWW bool `json:"redirectFromToWWW,omitempty"` + // CertificateAuth indicates the this server requires mutual authentication + // +optional + CertificateAuth authtls.AuthSSLConfig `json:"certificateAuth"` } // Location describes an URI inside a server. @@ -227,7 +250,6 @@ type Server struct { // In some cases when more than one annotations is defined a particular order in the execution // is required. // The chain in the execution order of annotations should be: -// - CertificateAuth // - Whitelist // - RateLimit // - BasicDigestAuth @@ -246,18 +268,25 @@ type Location struct { // contains active endpoints or not. Returning true means the location // uses the default backend. IsDefBackend bool `json:"isDefBackend"` + // Ingress returns the ingress from which this location was generated + Ingress *extensions.Ingress `json:"ingress"` // Backend describes the name of the backend to use. Backend string `json:"backend"` - - Service *api.Service `json:"service"` - Port intstr.IntOrString `json:"port"` + // Service describes the referenced services from the ingress + Service *api.Service `json:"service,omitempty"` + // Port describes to which port from the service + Port intstr.IntOrString `json:"port"` + // Overwrite the Host header passed into the backend. Defaults to + // vhost of the incoming request. + // +optional + UpstreamVhost string `json:"upstream-vhost"` // BasicDigestAuth returns authentication configuration for // an Ingress rule. // +optional BasicDigestAuth auth.BasicDigest `json:"basicDigestAuth,omitempty"` // Denied returns an error when this location cannot not be allowed // Requesting a denied location should return HTTP code 403. - Denied error + Denied error `json:"denied,omitempty"` // EnableCORS indicates if path must support CORS // +optional EnableCORS bool `json:"enableCors,omitempty"` @@ -270,9 +299,12 @@ type Location struct { // The Redirect annotation precedes RateLimit // +optional RateLimit ratelimit.RateLimit `json:"rateLimit,omitempty"` - // Redirect describes the redirection this location. + // Redirect describes a temporal o permanent redirection this location. // +optional - Redirect rewrite.Redirect `json:"redirect,omitempty"` + Redirect redirect.Redirect `json:"redirect,omitempty"` + // Rewrite describes the redirection this location. + // +optional + Rewrite rewrite.Redirect `json:"rewrite,omitempty"` // Whitelist indicates only connections from certain client // addresses or networks are allowed. // +optional @@ -281,16 +313,19 @@ type Location struct { // to be used in connections against endpoints // +optional Proxy proxy.Configuration `json:"proxy,omitempty"` - // CertificateAuth indicates the access to this location requires - // external authentication - // +optional - CertificateAuth authtls.AuthSSLConfig `json:"certificateAuth,omitempty"` // UsePortInRedirects indicates if redirects must specify the port // +optional - UsePortInRedirects bool `json:"use-port-in-redirects"` + UsePortInRedirects bool `json:"usePortInRedirects"` // ConfigurationSnippet contains additional configuration for the backend // to be considered in the configuration of the location - ConfigurationSnippet string `json:"configuration-snippet"` + ConfigurationSnippet string `json:"configurationSnippet"` + // ClientBodyBufferSize allows for the configuration of the client body + // buffer size for a specific location. + // +optional + ClientBodyBufferSize string `json:"clientBodyBufferSize,omitempty"` + // DefaultBackend allows the use of a custom default backend for this location. + // +optional + DefaultBackend *api.Service `json:"defaultBackend,omitempty"` } // SSLPassthroughBackend describes a SSL upstream server configured @@ -298,7 +333,7 @@ type Location struct { // The endpoints must provide the TLS termination exposing the required SSL certificate. // The ingress controller only pipes the underlying TCP connection type SSLPassthroughBackend struct { - Service *api.Service `json:"service"` + Service *api.Service `json:"service,omitEmpty"` Port intstr.IntOrString `json:"port"` // Backend describes the endpoints to use. Backend string `json:"namespace,omitempty"` @@ -313,7 +348,7 @@ type L4Service struct { // Backend of the service Backend L4Backend `json:"backend"` // Endpoints active endpoints of the service - Endpoints []Endpoint `json:"endpoins"` + Endpoints []Endpoint `json:"endpoins,omitEmpty"` } // L4Backend describes the kubernetes service behind L4 Ingress service @@ -322,4 +357,6 @@ type L4Backend struct { Name string `json:"name"` Namespace string `json:"namespace"` Protocol api.Protocol `json:"protocol"` + // +optional + UseProxyProtocol bool `json:"useProxyProtocol"` } diff --git a/core/pkg/ingress/types_equals.go b/core/pkg/ingress/types_equals.go new file mode 100644 index 000000000..3100da373 --- /dev/null +++ b/core/pkg/ingress/types_equals.go @@ -0,0 +1,485 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +// Equal tests for equality between two BackendInfo types +func (bi1 *BackendInfo) Equal(bi2 *BackendInfo) bool { + if bi1 == bi2 { + return true + } + if bi1 == nil || bi2 == nil { + return false + } + if bi1.Name != bi2.Name { + return false + } + if bi1.Release != bi2.Release { + return false + } + if bi1.Build != bi2.Build { + return false + } + if bi1.Repository != bi2.Repository { + return false + } + + return true +} + +// Equal tests for equality between two Configuration types +func (c1 *Configuration) Equal(c2 *Configuration) bool { + if c1 == c2 { + return true + } + if c1 == nil || c2 == nil { + return false + } + + if len(c1.Backends) != len(c2.Backends) { + return false + } + + for _, c1b := range c1.Backends { + found := false + for _, c2b := range c2.Backends { + if c1b.Equal(c2b) { + found = true + break + } + } + if !found { + return false + } + } + + if len(c1.Servers) != len(c2.Servers) { + return false + } + + for _, c1s := range c1.Servers { + found := false + for _, c2s := range c2.Servers { + if c1s.Equal(c2s) { + found = true + break + } + } + if !found { + return false + } + } + + if len(c1.TCPEndpoints) != len(c2.TCPEndpoints) { + return false + } + + for _, tcp1 := range c1.TCPEndpoints { + found := false + for _, tcp2 := range c2.TCPEndpoints { + if (&tcp1).Equal(&tcp2) { + found = true + break + } + } + if !found { + return false + } + } + + if len(c1.UDPEndpoints) != len(c2.UDPEndpoints) { + return false + } + + for _, udp1 := range c1.UDPEndpoints { + found := false + for _, udp2 := range c2.UDPEndpoints { + if (&udp1).Equal(&udp2) { + found = true + break + } + } + if !found { + return false + } + } + + if len(c1.PassthroughBackends) != len(c2.PassthroughBackends) { + return false + } + + for _, ptb1 := range c1.PassthroughBackends { + found := false + for _, ptb2 := range c2.PassthroughBackends { + if ptb1.Equal(ptb2) { + found = true + break + } + } + if !found { + return false + } + } + + return true +} + +// Equal tests for equality between two Backend types +func (b1 *Backend) Equal(b2 *Backend) bool { + if b1 == b2 { + return true + } + if b1 == nil || b2 == nil { + return false + } + if b1.Name != b2.Name { + return false + } + + if b1.Service == nil || b2.Service == nil { + return false + } + if b1.Service.GetNamespace() != b2.Service.GetNamespace() { + return false + } + if b1.Service.GetName() != b2.Service.GetName() { + return false + } + if b1.Service.GetResourceVersion() != b2.Service.GetResourceVersion() { + return false + } + + if b1.Port != b2.Port { + return false + } + if b1.Secure != b2.Secure { + return false + } + if !(&b1.SecureCACert).Equal(&b2.SecureCACert) { + return false + } + if b1.SSLPassthrough != b2.SSLPassthrough { + return false + } + if !(&b1.SessionAffinity).Equal(&b2.SessionAffinity) { + return false + } + + if len(b1.Endpoints) != len(b2.Endpoints) { + return false + } + + for _, udp1 := range b1.Endpoints { + found := false + for _, udp2 := range b2.Endpoints { + if (&udp1).Equal(&udp2) { + found = true + break + } + } + if !found { + return false + } + } + + return true +} + +// Equal tests for equality between two SessionAffinityConfig types +func (sac1 *SessionAffinityConfig) Equal(sac2 *SessionAffinityConfig) bool { + if sac1 == sac2 { + return true + } + if sac1 == nil || sac2 == nil { + return false + } + if sac1.AffinityType != sac2.AffinityType { + return false + } + if !(&sac1.CookieSessionAffinity).Equal(&sac2.CookieSessionAffinity) { + return false + } + + return true +} + +// Equal tests for equality between two CookieSessionAffinity types +func (csa1 *CookieSessionAffinity) Equal(csa2 *CookieSessionAffinity) bool { + if csa1 == csa2 { + return true + } + if csa1 == nil || csa2 == nil { + return false + } + if csa1.Name != csa2.Name { + return false + } + if csa1.Hash != csa2.Hash { + return false + } + + return true +} + +// Equal checks the equality against an Endpoint +func (e1 *Endpoint) Equal(e2 *Endpoint) bool { + if e1 == e2 { + return true + } + if e1 == nil || e2 == nil { + return false + } + if e1.Address != e2.Address { + return false + } + if e1.Port != e2.Port { + return false + } + if e1.MaxFails != e2.MaxFails { + return false + } + if e1.FailTimeout != e2.FailTimeout { + return false + } + + if e1.Target == nil || e2.Target == nil { + return false + } + if e1.Target.UID != e2.Target.UID { + return false + } + if e1.Target.ResourceVersion != e2.Target.ResourceVersion { + return false + } + + return true +} + +// Equal tests for equality between two Server types +func (s1 *Server) Equal(s2 *Server) bool { + if s1 == s2 { + return true + } + if s1 == nil || s2 == nil { + return false + } + if s1.Hostname != s2.Hostname { + return false + } + if s1.Alias != s2.Alias { + return false + } + if s1.SSLPassthrough != s2.SSLPassthrough { + return false + } + if s1.SSLCertificate != s2.SSLCertificate { + return false + } + if s1.SSLPemChecksum != s2.SSLPemChecksum { + return false + } + if !(&s1.CertificateAuth).Equal(&s2.CertificateAuth) { + return false + } + if s1.RedirectFromToWWW != s2.RedirectFromToWWW { + return false + } + + if len(s1.Locations) != len(s2.Locations) { + return false + } + + for _, s1l := range s1.Locations { + found := false + for _, sl2 := range s2.Locations { + if s1l.Equal(sl2) { + found = true + break + } + } + if !found { + return false + } + } + + return true +} + +// Equal tests for equality between two Location types +func (l1 *Location) Equal(l2 *Location) bool { + if l1 == l2 { + return true + } + if l1 == nil || l2 == nil { + return false + } + if l1.Path != l2.Path { + return false + } + if l1.IsDefBackend != l2.IsDefBackend { + return false + } + if l1.Backend != l2.Backend { + return false + } + + if l1.Service == nil || l2.Service == nil { + return false + } + if l1.Service.GetNamespace() != l2.Service.GetNamespace() { + return false + } + if l1.Service.GetName() != l2.Service.GetName() { + return false + } + if l1.Service.GetResourceVersion() != l2.Service.GetResourceVersion() { + return false + } + + if l1.Port.StrVal != l2.Port.StrVal { + return false + } + if !(&l1.BasicDigestAuth).Equal(&l2.BasicDigestAuth) { + return false + } + if l1.Denied != l2.Denied { + return false + } + if l1.EnableCORS != l2.EnableCORS { + return false + } + if !(&l1.ExternalAuth).Equal(&l2.ExternalAuth) { + return false + } + if !(&l1.RateLimit).Equal(&l2.RateLimit) { + return false + } + if !(&l1.Redirect).Equal(&l2.Redirect) { + return false + } + if !(&l1.Rewrite).Equal(&l2.Rewrite) { + return false + } + if !(&l1.Whitelist).Equal(&l2.Whitelist) { + return false + } + if !(&l1.Proxy).Equal(&l2.Proxy) { + return false + } + if l1.UsePortInRedirects != l2.UsePortInRedirects { + return false + } + if l1.ConfigurationSnippet != l2.ConfigurationSnippet { + return false + } + if l1.ClientBodyBufferSize != l2.ClientBodyBufferSize { + return false + } + + return true +} + +// Equal tests for equality between two SSLPassthroughBackend types +func (ptb1 *SSLPassthroughBackend) Equal(ptb2 *SSLPassthroughBackend) bool { + if ptb1 == ptb2 { + return true + } + if ptb1 == nil || ptb2 == nil { + return false + } + if ptb1.Backend != ptb2.Backend { + return false + } + if ptb1.Hostname != ptb2.Hostname { + return false + } + if ptb1.Port != ptb2.Port { + return false + } + + if ptb1.Service == nil || ptb2.Service == nil { + return false + } + if ptb1.Service.GetNamespace() != ptb2.Service.GetNamespace() { + return false + } + if ptb1.Service.GetName() != ptb2.Service.GetName() { + return false + } + if ptb1.Service.GetResourceVersion() != ptb2.Service.GetResourceVersion() { + return false + } + + return true +} + +// Equal tests for equality between two L4Service types +func (e1 *L4Service) Equal(e2 *L4Service) bool { + if e1 == e2 { + return true + } + if e1 == nil || e2 == nil { + return false + } + if e1.Port != e2.Port { + return false + } + if !(&e1.Backend).Equal(&e2.Backend) { + return false + } + if len(e1.Endpoints) != len(e2.Endpoints) { + return false + } + + for _, ep1 := range e1.Endpoints { + found := false + for _, ep2 := range e2.Endpoints { + if (&ep1).Equal(&ep2) { + found = true + break + } + } + if !found { + return false + } + } + + return true +} + +// Equal tests for equality between two L4Backend types +func (l4b1 *L4Backend) Equal(l4b2 *L4Backend) bool { + if l4b1 == l4b2 { + return true + } + if l4b1 == nil || l4b2 == nil { + return false + } + if l4b1.Port != l4b2.Port { + return false + } + if l4b1.Name != l4b2.Name { + return false + } + if l4b1.Namespace != l4b2.Namespace { + return false + } + if l4b1.Protocol != l4b2.Protocol { + return false + } + + return true +} diff --git a/core/pkg/ingress/zz_generated.types.go b/core/pkg/ingress/zz_generated.types.go new file mode 100644 index 000000000..43152b22e --- /dev/null +++ b/core/pkg/ingress/zz_generated.types.go @@ -0,0 +1,116 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package ingress + +import ( + v1 "k8s.io/api/core/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy__Backend, InType: reflect.TypeOf(&Backend{})}, + {Fn: DeepCopy__CookieSessionAffinity, InType: reflect.TypeOf(&CookieSessionAffinity{})}, + {Fn: DeepCopy__Endpoint, InType: reflect.TypeOf(&Endpoint{})}, + {Fn: DeepCopy__SessionAffinityConfig, InType: reflect.TypeOf(&SessionAffinityConfig{})}, + } +} + +// DeepCopy__Backend is an autogenerated deepcopy function. +func DeepCopy__Backend(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Backend) + out := out.(*Backend) + *out = *in + if in.Service != nil { + in, out := &in.Service, &out.Service + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.Service) + } + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + if err := DeepCopy__Endpoint(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if err := DeepCopy__SessionAffinityConfig(&in.SessionAffinity, &out.SessionAffinity, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy__CookieSessionAffinity is an autogenerated deepcopy function. +func DeepCopy__CookieSessionAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CookieSessionAffinity) + out := out.(*CookieSessionAffinity) + *out = *in + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make(map[string][]string) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*[]string) + } + } + } + return nil + } +} + +// DeepCopy__Endpoint is an autogenerated deepcopy function. +func DeepCopy__Endpoint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Endpoint) + out := out.(*Endpoint) + *out = *in + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(v1.ObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy__SessionAffinityConfig is an autogenerated deepcopy function. +func DeepCopy__SessionAffinityConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SessionAffinityConfig) + out := out.(*SessionAffinityConfig) + *out = *in + if err := DeepCopy__CookieSessionAffinity(&in.CookieSessionAffinity, &out.CookieSessionAffinity, c); err != nil { + return err + } + return nil + } +} diff --git a/core/pkg/k8s/main.go b/core/pkg/k8s/main.go index 44d0fb539..82a1ecdcc 100644 --- a/core/pkg/k8s/main.go +++ b/core/pkg/k8s/main.go @@ -21,9 +21,9 @@ import ( "os" "strings" + api "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" - api "k8s.io/client-go/pkg/api/v1" ) // IsValidService checks if exists a service with the specified name @@ -117,7 +117,7 @@ func GetPodDetails(kubeClient clientset.Interface) (*PodInfo, error) { podName := os.Getenv("POD_NAME") podNs := os.Getenv("POD_NAMESPACE") - if podName == "" && podNs == "" { + if podName == "" || podNs == "" { return nil, fmt.Errorf("unable to get POD information (missing POD_NAME or POD_NAMESPACE environment variable") } diff --git a/core/pkg/k8s/main_test.go b/core/pkg/k8s/main_test.go index af6468dc7..820b1216e 100644 --- a/core/pkg/k8s/main_test.go +++ b/core/pkg/k8s/main_test.go @@ -20,9 +20,9 @@ import ( "os" "testing" + api "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" testclient "k8s.io/client-go/kubernetes/fake" - api "k8s.io/client-go/pkg/api/v1" ) func TestParseNameNS(t *testing.T) { @@ -297,14 +297,30 @@ func TestGetPodDetails(t *testing.T) { t.Errorf("expected an error but returned nil") } - // POD not exist - os.Setenv("POD_NAME", "testpod") + // POD_NAME not exist + os.Setenv("POD_NAME", "") os.Setenv("POD_NAMESPACE", api.NamespaceDefault) _, err2 := GetPodDetails(testclient.NewSimpleClientset()) if err2 == nil { t.Errorf("expected an error but returned nil") } + // POD_NAMESPACE not exist + os.Setenv("POD_NAME", "testpod") + os.Setenv("POD_NAMESPACE", "") + _, err3 := GetPodDetails(testclient.NewSimpleClientset()) + if err3 == nil { + t.Errorf("expected an error but returned nil") + } + + // POD not exist + os.Setenv("POD_NAME", "testpod") + os.Setenv("POD_NAMESPACE", api.NamespaceDefault) + _, err4 := GetPodDetails(testclient.NewSimpleClientset()) + if err4 == nil { + t.Errorf("expected an error but returned nil") + } + // success to get PodInfo fkClient := testclient.NewSimpleClientset( &api.PodList{Items: []api.Pod{{ @@ -331,8 +347,8 @@ func TestGetPodDetails(t *testing.T) { }, }}}) - epi, err3 := GetPodDetails(fkClient) - if err3 != nil { + epi, err5 := GetPodDetails(fkClient) + if err5 != nil { t.Errorf("expected a PodInfo but returned error") return } diff --git a/core/pkg/net/dns/dns.go b/core/pkg/net/dns/dns.go index d381904e5..1249f7910 100644 --- a/core/pkg/net/dns/dns.go +++ b/core/pkg/net/dns/dns.go @@ -38,11 +38,11 @@ func GetSystemNameServers() ([]net.IP, error) { lines := strings.Split(string(file), "\n") for l := range lines { trimmed := strings.TrimSpace(lines[l]) - if strings.HasPrefix(trimmed, "#") { + if len(trimmed) == 0 || trimmed[0] == '#' || trimmed[0] == ';' { continue } fields := strings.Fields(trimmed) - if len(fields) == 0 { + if len(fields) < 2 { continue } if fields[0] == "nameserver" { diff --git a/core/pkg/net/dns/dns_test.go b/core/pkg/net/dns/dns_test.go index 1b99da1d7..979d65c32 100644 --- a/core/pkg/net/dns/dns_test.go +++ b/core/pkg/net/dns/dns_test.go @@ -40,6 +40,8 @@ func TestGetDNSServers(t *testing.T) { defer os.Remove(file.Name()) ioutil.WriteFile(file.Name(), []byte(` + # comment + ; comment nameserver 2001:4860:4860::8844 nameserver 2001:4860:4860::8888 nameserver 8.8.8.8 diff --git a/core/pkg/net/ipnet.go b/core/pkg/net/ipnet.go new file mode 100644 index 000000000..b48d13db8 --- /dev/null +++ b/core/pkg/net/ipnet.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "net" + "strings" +) + +// IPNet maps string to net.IPNet. +type IPNet map[string]*net.IPNet + +// IP maps string to net.IP. +type IP map[string]net.IP + +// ParseIPNets parses string slice to IPNet. +func ParseIPNets(specs ...string) (IPNet, IP, error) { + ipnetset := make(IPNet) + ipset := make(IP) + + for _, spec := range specs { + spec = strings.TrimSpace(spec) + _, ipnet, err := net.ParseCIDR(spec) + if err != nil { + ip := net.ParseIP(spec) + if ip == nil { + return nil, nil, err + } + i := ip.String() + ipset[i] = ip + continue + } + + k := ipnet.String() + ipnetset[k] = ipnet + } + + return ipnetset, ipset, nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go b/core/pkg/net/ipnet_test.go similarity index 64% rename from vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go rename to core/pkg/net/ipnet_test.go index 2ff5732d6..3ce1345c6 100644 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go +++ b/core/pkg/net/ipnet_test.go @@ -14,13 +14,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package net import ( - "k8s.io/apimachinery/pkg/runtime" + "testing" ) -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - return scheme.AddConversionFuncs() +func TestNewIPSet(t *testing.T) { + ipsets, ips, err := ParseIPNets("1.0.0.0", "2.0.0.0/8", "3.0.0.0/8") + if err != nil { + t.Errorf("error parsing IPNets: %v", err) + } + if len(ipsets) != 2 { + t.Errorf("Expected len=2: %d", len(ipsets)) + } + if len(ips) != 1 { + t.Errorf("Expected len=1: %d", len(ips)) + } } diff --git a/core/pkg/net/net.go b/core/pkg/net/net.go index cc4891aa5..d18ca49e8 100644 --- a/core/pkg/net/net.go +++ b/core/pkg/net/net.go @@ -16,15 +16,9 @@ limitations under the License. package net -import ( - _net "net" - "strings" -) +import _net "net" // IsIPV6 checks if the input contains a valid IPV6 address func IsIPV6(ip _net.IP) bool { - if dp := strings.Index(ip.String(), ":"); dp != -1 { - return true - } - return false + return ip.To4() == nil } diff --git a/core/pkg/net/ssl/ssl.go b/core/pkg/net/ssl/ssl.go index 4b3f08b4d..9cf0a984d 100644 --- a/core/pkg/net/ssl/ssl.go +++ b/core/pkg/net/ssl/ssl.go @@ -19,24 +19,31 @@ package ssl import ( "crypto/rand" "crypto/rsa" - "crypto/sha1" "crypto/tls" "crypto/x509" "crypto/x509/pkix" - "encoding/hex" + "encoding/asn1" "encoding/pem" "errors" "fmt" "io/ioutil" "math/big" + "net" "os" + "strconv" "time" "github.com/golang/glog" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress/core/pkg/file" "k8s.io/ingress/core/pkg/ingress" ) +var ( + oidExtensionSubjectAltName = asn1.ObjectIdentifier{2, 5, 29, 17} +) + // AddOrUpdateCertAndKey creates a .pem file wth the cert and the key with the specified name func AddOrUpdateCertAndKey(name string, cert, key, ca []byte) (*ingress.SSLCert, error) { pemName := fmt.Sprintf("%v.pem", name) @@ -97,9 +104,28 @@ func AddOrUpdateCertAndKey(name string, cert, key, ca []byte) (*ingress.SSLCert, return nil, err } - cn := []string{pemCert.Subject.CommonName} - if len(pemCert.DNSNames) > 0 { - cn = append(cn, pemCert.DNSNames...) + cn := sets.NewString(pemCert.Subject.CommonName) + for _, dns := range pemCert.DNSNames { + if !cn.Has(dns) { + cn.Insert(dns) + } + } + + if len(pemCert.Extensions) > 0 { + glog.V(3).Info("parsing ssl certificate extensions") + for _, ext := range getExtension(pemCert, oidExtensionSubjectAltName) { + dns, _, _, err := parseSANExtension(ext.Value) + if err != nil { + glog.Warningf("unexpected error parsing certificate extensions: %v", err) + continue + } + + for _, dns := range dns { + if !cn.Has(dns) { + cn.Insert(dns) + } + } + } } err = os.Rename(tempPemFile.Name(), pemFileName) @@ -134,22 +160,90 @@ func AddOrUpdateCertAndKey(name string, cert, key, ca []byte) (*ingress.SSLCert, caFile.Write([]byte("\n")) return &ingress.SSLCert{ + Certificate: pemCert, CAFileName: pemFileName, PemFileName: pemFileName, - PemSHA: PemSHA1(pemFileName), - CN: cn, + PemSHA: file.SHA1(pemFileName), + CN: cn.List(), ExpireTime: pemCert.NotAfter, }, nil } return &ingress.SSLCert{ + Certificate: pemCert, PemFileName: pemFileName, - PemSHA: PemSHA1(pemFileName), - CN: cn, + PemSHA: file.SHA1(pemFileName), + CN: cn.List(), ExpireTime: pemCert.NotAfter, }, nil } +func getExtension(c *x509.Certificate, id asn1.ObjectIdentifier) []pkix.Extension { + var exts []pkix.Extension + for _, ext := range c.Extensions { + if ext.Id.Equal(id) { + exts = append(exts, ext) + } + } + return exts +} + +func parseSANExtension(value []byte) (dnsNames, emailAddresses []string, ipAddresses []net.IP, err error) { + // RFC 5280, 4.2.1.6 + + // SubjectAltName ::= GeneralNames + // + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + var rest []byte + if rest, err = asn1.Unmarshal(value, &seq); err != nil { + return + } else if len(rest) != 0 { + err = errors.New("x509: trailing data after X.509 extension") + return + } + if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 { + err = asn1.StructuralError{Msg: "bad SAN sequence"} + return + } + + rest = seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return + } + switch v.Tag { + case 1: + emailAddresses = append(emailAddresses, string(v.Bytes)) + case 2: + dnsNames = append(dnsNames, string(v.Bytes)) + case 7: + switch len(v.Bytes) { + case net.IPv4len, net.IPv6len: + ipAddresses = append(ipAddresses, v.Bytes) + default: + err = errors.New("x509: certificate contained IP address of length " + strconv.Itoa(len(v.Bytes))) + return + } + } + } + + return +} + // AddCertAuth creates a .pem file with the specified CAs to be used in Cert Authentication // If it's already exists, it's clobbered. func AddCertAuth(name string, ca []byte) (*ingress.SSLCert, error) { @@ -180,7 +274,7 @@ func AddCertAuth(name string, ca []byte) (*ingress.SSLCert, error) { return &ingress.SSLCert{ CAFileName: caFileName, PemFileName: caFileName, - PemSHA: PemSHA1(caFileName), + PemSHA: file.SHA1(caFileName), }, nil } @@ -232,19 +326,6 @@ func AddOrUpdateDHParam(name string, dh []byte) (string, error) { return pemFileName, nil } -// PemSHA1 returns the SHA1 of a pem file. This is used to -// reload NGINX in case a secret with a SSL certificate changed. -func PemSHA1(filename string) string { - hasher := sha1.New() - s, err := ioutil.ReadFile(filename) - if err != nil { - return "" - } - - hasher.Write(s) - return hex.EncodeToString(hasher.Sum(nil)) -} - // GetFakeSSLCert creates a Self Signed Certificate // Based in the code https://golang.org/src/crypto/tls/generate_cert.go func GetFakeSSLCert() ([]byte, []byte) { diff --git a/core/pkg/net/ssl/ssl_test.go b/core/pkg/net/ssl/ssl_test.go index b4d3c5eff..564fc1d53 100644 --- a/core/pkg/net/ssl/ssl_test.go +++ b/core/pkg/net/ssl/ssl_test.go @@ -17,15 +17,44 @@ limitations under the License. package ssl import ( - "encoding/base64" + "crypto/x509" "fmt" "io/ioutil" "testing" "time" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/cert/triple" "k8s.io/ingress/core/pkg/ingress" ) +// generateRSACerts generates a self signed certificate using a self generated ca +func generateRSACerts(host string) (*triple.KeyPair, *triple.KeyPair, error) { + ca, err := triple.NewCA("self-sign-ca") + if err != nil { + return nil, nil, err + } + + key, err := certutil.NewPrivateKey() + if err != nil { + return nil, nil, fmt.Errorf("unable to create a server private key: %v", err) + } + + config := certutil.Config{ + CommonName: host, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + cert, err := certutil.NewSignedCert(config, key, ca.Cert, ca.Key) + if err != nil { + return nil, nil, fmt.Errorf("unable to sign the server certificate: %v", err) + } + + return &triple.KeyPair{ + Key: key, + Cert: cert, + }, ca, nil +} + func TestAddOrUpdateCertAndKey(t *testing.T) { td, err := ioutil.TempDir("", "ssl") if err != nil { @@ -33,23 +62,17 @@ func TestAddOrUpdateCertAndKey(t *testing.T) { } ingress.DefaultSSLDirectory = td - // openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=echoheaders/O=echoheaders" - tlsCrt := "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURhakNDQWxLZ0F3SUJBZ0lKQUxHUXR5VVBKTFhYTUEwR0NTcUdTSWIzRFFFQkJRVUFNQ3d4RkRBU0JnTlYKQkFNVEMyVmphRzlvWldGa1pYSnpNUlF3RWdZRFZRUUtFd3RsWTJodmFHVmhaR1Z5Y3pBZUZ3MHhOakF6TXpFeQpNekU1TkRoYUZ3MHhOekF6TXpFeU16RTVORGhhTUN3eEZEQVNCZ05WQkFNVEMyVmphRzlvWldGa1pYSnpNUlF3CkVnWURWUVFLRXd0bFkyaHZhR1ZoWkdWeWN6Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0MKZ2dFQkFONzVmS0N5RWwxanFpMjUxTlNabDYzeGQweG5HMHZTVjdYL0xxTHJveVNraW5nbnI0NDZZWlE4UEJWOAo5TUZzdW5RRGt1QVoyZzA3NHM1YWhLSm9BRGJOMzhld053RXNsVDJkRzhRTUw0TktrTUNxL1hWbzRQMDFlWG1PCmkxR2txZFA1ZUExUHlPZCtHM3gzZmxPN2xOdmtJdHVHYXFyc0tvMEhtMHhqTDVtRUpwWUlOa0tGSVhsWWVLZS8KeHRDR25CU2tLVHFMTG0yeExKSGFFcnJpaDZRdkx4NXF5U2gzZTU2QVpEcTlkTERvcWdmVHV3Z2IzekhQekc2NwppZ0E0dkYrc2FRNHpZUE1NMHQyU1NiVkx1M2pScWNvL3lxZysrOVJBTTV4bjRubnorL0hUWFhHKzZ0RDBaeGI1CmVVRDNQakVhTnlXaUV2dTN6UFJmdysyNURMY0NBd0VBQWFPQmpqQ0JpekFkQmdOVkhRNEVGZ1FVcktMZFhHeUUKNUlEOGRvd2lZNkdzK3dNMHFKc3dYQVlEVlIwakJGVXdVNEFVcktMZFhHeUU1SUQ4ZG93aVk2R3Mrd00wcUp1aApNS1F1TUN3eEZEQVNCZ05WQkFNVEMyVmphRzlvWldGa1pYSnpNUlF3RWdZRFZRUUtFd3RsWTJodmFHVmhaR1Z5CmM0SUpBTEdRdHlVUEpMWFhNQXdHQTFVZEV3UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUZCUUFEZ2dFQkFNZVMKMHFia3VZa3Z1enlSWmtBeE1PdUFaSDJCK0Evb3N4ODhFRHB1ckV0ZWN5RXVxdnRvMmpCSVdCZ2RkR3VBYU5jVQorUUZDRm9NakJOUDVWVUxIWVhTQ3VaczN2Y25WRDU4N3NHNlBaLzhzbXJuYUhTUjg1ZVpZVS80bmFyNUErdWErClIvMHJrSkZnOTlQSmNJd3JmcWlYOHdRcWdJVVlLNE9nWEJZcUJRL0VZS2YvdXl6UFN3UVZYRnVJTTZTeDBXcTYKTUNML3d2RlhLS0FaWDBqb3J4cHRjcldkUXNCcmYzWVRnYmx4TE1sN20zL2VuR1drcEhDUHdYeVRCOC9rRkw3SApLL2ZHTU1NWGswUkVSbGFPM1hTSUhrZUQ2SXJiRnRNV3R1RlJwZms2ZFA2TXlMOHRmTmZ6a3VvUHVEWUFaWllWCnR1NnZ0c0FRS0xWb0pGaGV0b1k9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" - tlsKey := "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBM3ZsOG9MSVNYV09xTGJuVTFKbVhyZkYzVEdjYlM5Slh0Zjh1b3V1akpLU0tlQ2V2CmpqcGhsRHc4Rlh6MHdXeTZkQU9TNEJuYURUdml6bHFFb21nQU5zM2Z4N0EzQVN5VlBaMGJ4QXd2ZzBxUXdLcjkKZFdqZy9UVjVlWTZMVWFTcDAvbDREVS9JNTM0YmZIZCtVN3VVMitRaTI0WnFxdXdxalFlYlRHTXZtWVFtbGdnMgpRb1VoZVZoNHA3L0cwSWFjRktRcE9vc3ViYkVza2RvU3V1S0hwQzh2SG1ySktIZDdub0JrT3IxMHNPaXFCOU83CkNCdmZNYy9NYnJ1S0FEaThYNnhwRGpOZzh3elMzWkpKdFV1N2VOR3B5ai9LcUQ3NzFFQXpuR2ZpZWZQNzhkTmQKY2I3cTBQUm5Gdmw1UVBjK01SbzNKYUlTKzdmTTlGL0Q3YmtNdHdJREFRQUJBb0lCQUViNmFEL0hMNjFtMG45bgp6bVkyMWwvYW83MUFmU0h2dlZnRCtWYUhhQkY4QjFBa1lmQUdpWlZrYjBQdjJRSFJtTERoaWxtb0lROWhadHVGCldQOVIxKythTFlnbGdmenZzanBBenR2amZTUndFaEFpM2pnSHdNY1p4S2Q3UnNJZ2hxY2huS093S0NYNHNNczQKUnBCbEFBZlhZWGs4R3F4NkxUbGptSDRDZk42QzZHM1EwTTlLMUxBN2lsck1Na3hwcngxMnBlVTNkczZMVmNpOQptOFdBL21YZ2I0c3pEbVNaWVpYRmNZMEhYNTgyS3JKRHpQWEVJdGQwZk5wd3I0eFIybzdzMEwvK2RnZCtqWERjCkh2SDBKZ3NqODJJaTIxWGZGM2tST3FxR3BKNmhVcncxTUZzVWRyZ29GL3pFck0vNWZKMDdVNEhodGFlalVzWTIKMFJuNXdpRUNnWUVBKzVUTVRiV084Wkg5K2pIdVQwc0NhZFBYcW50WTZYdTZmYU04Tm5CZWNoeTFoWGdlQVN5agpSWERlZGFWM1c0SjU5eWxIQ3FoOVdseVh4cDVTWWtyQU41RnQ3elFGYi91YmorUFIyWWhMTWZpYlBSYlYvZW1MCm5YaGF6MmtlNUUxT1JLY0x6QUVwSmpuZGQwZlZMZjdmQzFHeStnS2YyK3hTY1hjMHJqRE5iNGtDZ1lFQTR1UVEKQk91TlJQS3FKcDZUZS9zUzZrZitHbEpjQSs3RmVOMVlxM0E2WEVZVm9ydXhnZXQ4a2E2ZEo1QjZDOWtITGtNcQpwdnFwMzkxeTN3YW5uWC9ONC9KQlU2M2RxZEcyd1BWRUQ0REduaE54Qm1oaWZpQ1I0R0c2ZnE4MUV6ZE1vcTZ4CklTNHA2RVJaQnZkb1RqNk9pTHl6aUJMckpxeUhIMWR6c0hGRlNqOENnWUVBOWlSSEgyQ2JVazU4SnVYak8wRXcKUTBvNG4xdS9TZkQ4TFNBZ01VTVBwS1hpRTR2S0Qyd1U4a1BUNDFiWXlIZUh6UUpkdDFmU0RTNjZjR0ZHU1ZUSgphNVNsOG5yN051ejg3bkwvUmMzTGhFQ3Y0YjBOOFRjbW1oSy9CbDdiRXBOd0dFczNoNGs3TVdNOEF4QU15c3VxCmZmQ1pJM0tkNVJYNk0zbGwyV2QyRjhFQ2dZQlQ5RU9oTG0vVmhWMUVjUVR0cVZlMGJQTXZWaTVLSGozZm5UZkUKS0FEUVIvYVZncElLR3RLN0xUdGxlbVpPbi8yeU5wUS91UnpHZ3pDUUtldzNzU1RFSmMzYVlzbFVudzdhazJhZAp2ZTdBYXowMU84YkdHTk1oamNmdVBIS05LN2Nsc3pKRHJzcys4SnRvb245c0JHWEZYdDJuaWlpTTVPWVN5TTg4CkNJMjFEUUtCZ0hEQVRZbE84UWlDVWFBQlVqOFBsb1BtMDhwa3cyc1VmQW0xMzJCY00wQk9BN1hqYjhtNm1ManQKOUlteU5kZ2ZiM080UjlKVUxTb1pZSTc1dUxIL3k2SDhQOVlpWHZOdzMrTXl6VFU2b2d1YU8xSTNya2pna29NeAo5cU5pYlJFeGswS1A5MVZkckVLSEdHZEFwT05ES1N4VzF3ektvbUxHdmtYSTVKV05KRXFkCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==" - - dCrt, err := base64.StdEncoding.DecodeString(tlsCrt) + cert, _, err := generateRSACerts("echoheaders") if err != nil { - t.Fatalf("Unexpected error: %+v", err) - return - } - - dKey, err := base64.StdEncoding.DecodeString(tlsKey) - if err != nil { - t.Fatalf("Unexpected error: %+v", err) + t.Fatalf("unexpected error creating SSL certificate: %v", err) } name := fmt.Sprintf("test-%v", time.Now().UnixNano()) - ngxCert, err := AddOrUpdateCertAndKey(name, dCrt, dKey, []byte{}) + + c := certutil.EncodeCertPEM(cert.Cert) + k := certutil.EncodePrivateKeyPEM(cert.Key) + + ngxCert, err := AddOrUpdateCertAndKey(name, c, k, []byte{}) if err != nil { t.Fatalf("unexpected error checking SSL certificate: %v", err) } @@ -66,3 +89,62 @@ func TestAddOrUpdateCertAndKey(t *testing.T) { t.Fatalf("expected cname echoheaders but %v returned", ngxCert.CN[0]) } } + +func TestCACert(t *testing.T) { + td, err := ioutil.TempDir("", "ssl") + if err != nil { + t.Fatalf("Unexpected error creating temporal directory: %v", err) + } + ingress.DefaultSSLDirectory = td + + cert, CA, err := generateRSACerts("echoheaders") + if err != nil { + t.Fatalf("unexpected error creating SSL certificate: %v", err) + } + + name := fmt.Sprintf("test-%v", time.Now().UnixNano()) + + c := certutil.EncodeCertPEM(cert.Cert) + k := certutil.EncodePrivateKeyPEM(cert.Key) + ca := certutil.EncodeCertPEM(CA.Cert) + + ngxCert, err := AddOrUpdateCertAndKey(name, c, k, ca) + if err != nil { + t.Fatalf("unexpected error checking SSL certificate: %v", err) + } + if ngxCert.CAFileName == "" { + t.Fatalf("expected a valid CA file name") + } +} + +func TestGetFakeSSLCert(t *testing.T) { + k, c := GetFakeSSLCert() + if len(k) == 0 { + t.Fatalf("expected a valid key") + } + if len(c) == 0 { + t.Fatalf("expected a valid certificate") + } +} + +func TestAddCertAuth(t *testing.T) { + td, err := ioutil.TempDir("", "ssl") + if err != nil { + t.Fatalf("Unexpected error creating temporal directory: %v", err) + } + ingress.DefaultSSLDirectory = td + + cn := "demo-ca" + _, ca, err := generateRSACerts(cn) + if err != nil { + t.Fatalf("unexpected error creating SSL certificate: %v", err) + } + c := certutil.EncodeCertPEM(ca.Cert) + ic, err := AddCertAuth(cn, c) + if err != nil { + t.Fatalf("unexpected error creating SSL certificate: %v", err) + } + if ic.CAFileName == "" { + t.Fatalf("expected a valid CA file name") + } +} diff --git a/core/pkg/task/queue.go b/core/pkg/task/queue.go index a25d0e179..34913573e 100644 --- a/core/pkg/task/queue.go +++ b/core/pkg/task/queue.go @@ -39,7 +39,7 @@ type Queue struct { // sync is called for each item in the queue sync func(interface{}) error // workerDone is closed when the worker exits - workerDone chan struct{} + workerDone chan bool fn func(obj interface{}) (interface{}, error) } @@ -79,7 +79,9 @@ func (t *Queue) worker() { for { key, quit := t.queue.Get() if quit { - close(t.workerDone) + if !isClosed(t.workerDone) { + close(t.workerDone) + } return } @@ -95,6 +97,16 @@ func (t *Queue) worker() { } } +func isClosed(ch <-chan bool) bool { + select { + case <-ch: + return true + default: + } + + return false +} + // Shutdown shuts down the work queue and waits for the worker to ACK func (t *Queue) Shutdown() { t.queue.ShutDown() @@ -117,7 +129,7 @@ func NewCustomTaskQueue(syncFn func(interface{}) error, fn func(interface{}) (in q := &Queue{ queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), sync: syncFn, - workerDone: make(chan struct{}), + workerDone: make(chan bool), fn: fn, } diff --git a/core/pkg/watch/file_watcher.go b/core/pkg/watch/file_watcher.go index fe85c4cd0..0fea1a143 100644 --- a/core/pkg/watch/file_watcher.go +++ b/core/pkg/watch/file_watcher.go @@ -19,6 +19,7 @@ package watch import ( "log" "path" + "strings" "gopkg.in/fsnotify.v1" ) @@ -42,7 +43,7 @@ func NewFileWatcher(file string, onEvent func()) (FileWatcher, error) { } // Close ends the watch -func (f FileWatcher) Close() error { +func (f *FileWatcher) Close() error { return f.watcher.Close() } @@ -59,9 +60,9 @@ func (f *FileWatcher) watch() error { for { select { case event := <-watcher.Events: - if event.Op&fsnotify.Write == fsnotify.Write || - event.Op&fsnotify.Create == fsnotify.Create && - event.Name == file { + if (event.Op&fsnotify.Write == fsnotify.Write || + event.Op&fsnotify.Create == fsnotify.Create) && + strings.HasSuffix(event.Name, file) { f.onEvent() } case err := <-watcher.Errors: diff --git a/core/pkg/watch/file_watcher_test.go b/core/pkg/watch/file_watcher_test.go index 57b5c2c1c..5733cd07c 100644 --- a/core/pkg/watch/file_watcher_test.go +++ b/core/pkg/watch/file_watcher_test.go @@ -26,7 +26,7 @@ import ( func prepareTimeout() chan bool { timeoutChan := make(chan bool, 1) go func() { - time.Sleep(1 * time.Second) + time.Sleep(500 * time.Millisecond) timeoutChan <- true }() return timeoutChan diff --git a/docs/README.md b/docs/README.md index 721336b0b..f630209e2 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,4 +1,4 @@ -# Ingress documentation and examples +# Ingress Documentation and Examples This directory contains documentation. @@ -6,7 +6,7 @@ This directory contains documentation. Try to create a README file in every directory containing documentation and index out from there, that's what readers will notice first. Use lower case for other -file names unless you have a reason to draw someones attention to it. +file names unless you have a reason to draw someone's attention to it. Avoid CamelCase. Rationale: diff --git a/docs/admin.md b/docs/admin.md index a52b8df1e..c40247bd9 100644 --- a/docs/admin.md +++ b/docs/admin.md @@ -1,4 +1,4 @@ -# Ingress admin guide +# Ingress Admin Guide This is a guide to the different deployment styles of an Ingress controller. @@ -8,7 +8,7 @@ __GCP__: On GCE/GKE, the Ingress controller runs on the master. If you wish to stop this controller and run another instance on your nodes instead, you can do so by following this [example](/examples/deployment/gce). -__generic__: You can deploy a genric (nginx or haproxy) Ingress controller by simply +__Generic__: You can deploy a generic (nginx or haproxy) Ingress controller by simply running it as a pod in your cluster, as shown in the [examples](/examples/deployment). Please note that you must specify the `ingress.class` [annotation](/examples/PREREQUISITES.md#ingress-class) if you're running on a @@ -20,7 +20,7 @@ Ingress controller behind an ELB on AWS, as shows in the [next section](#stacked ## Stacked deployments -__Behind a LoadBalancer Service__: You can deploy an generic controller behind a +__Behind a LoadBalancer Service__: You can deploy a generic controller behind a Service of `Type=LoadBalancer`, by following this [example](/examples/static-ip/nginx#acquiring-an-ip). More specifically, first create a LoadBalancer Service that selects the generic controller pods, then start the generic controller with the `--publish-service` @@ -37,7 +37,7 @@ TODO: Write an example ## Daemonset -Neither a single pod or bank of generic controllers scales with the cluster size. +Neither a single pod nor bank of generic controllers scale with the cluster size. If you create a daemonset of generic Ingress controllers, every new node automatically gets an instance of the controller listening on the specified ports. diff --git a/docs/annotations.md b/docs/annotations.md index 8ee25cecd..b23f7ecee 100644 --- a/docs/annotations.md +++ b/docs/annotations.md @@ -1,73 +1,82 @@ # Ingress Annotations -This file defines a list of annotations which are supported by various Ingress controllers (both those based on the common ingress code, and alternative implementations). The intention is to ensure the maximum amount of compatibility between different implementations. +This file defines a list of annotations which are supported by various Ingress controllers (both those based on the common ingress code, and alternative implementations). +The intention is to ensure the maximum amount of compatibility between different implementations. -All annotations are assumed to be prefixed with `ingress.kubernetes.io/` except where otherwise specified. There is no attempt to record implementation-specific annotations using other prefixes. (Traefik in particular defines several of its own annotations which are not described here, and does not seem to support any of the standard annotations.) +All annotations are assumed to be prefixed with `ingress.kubernetes.io/` except where otherwise specified. +There is no attempt to record implementation-specific annotations using other prefixes. +(Traefik in particular defines several of its own annotations which are not described here, and does not seem to support any of the standard annotations.) Key: * `nginx`: the `kubernetes/ingress` nginx controller * `gce`: the `kubernetes/ingress` GCE controller -* `traefik`: Traefik's built-in Ingress controller +* `traefik`: Traefik's built-in Ingress controller * `haproxy`: Joao Morais' [HAProxy Ingress controller](https://github.com/jcmoraisjr/haproxy-ingress) * `trafficserver`: Torchbox's [Apache Traffic Server controller plugin](https://github.com/torchbox/k8s-ts-ingress) ## TLS-related -| Name | Meaning -| --- | --- -| `ssl-passthrough` | Pass TLS connections directly to backend; do not offload. Default `false`. (nginx) -| `ssl-redirect` | Redirect non-TLS requests to TLS when TLS is enabled. Default `true`. (nginx, haproxy, trafficserver) -| `force-ssl-redirect` | Redirect non-TLS requests to TLS even when TLS is not configured. Default `false`. (nginx, trafficserver). -| `secure-backends` | Use TLS to communicate with origin (pods). Default `false`. (nginx, trafficserver) -| `kubernetes.io/ingress.allow-http` | Whether to accept non-TLS HTTP connections. (gce) -| `hsts-max-age` | Set an HSTS header with this lifetime. (trafficserver) -| `hsts-include-subdomains` | Add includeSubdomains to the HSTS header. (trafficserver) +| Name | Meaning | Default | Controller +| --- | --- | --- | --- | +| `ssl-passthrough` | Pass TLS connections directly to backend; do not offload. | `false` | nginx, haproxy +| `ssl-redirect` | Redirect non-TLS requests to TLS when TLS is enabled. | `true` | nginx, haproxy, trafficserver +| `force-ssl-redirect` | Redirect non-TLS requests to TLS even when TLS is not configured. | `false` | nginx, trafficserver +| `secure-backends` | Use TLS to communicate with origin (pods). | `false` | nginx, haproxy, trafficserver +| `kubernetes.io/ingress.allow-http` | Whether to accept non-TLS HTTP connections. | `true` | gce +| `pre-shared-cert` | Name of the TLS certificate in GCP to use when provisioning the HTTPS load balancer. | empty string | gce +| `hsts-max-age` | Set an HSTS header with this lifetime. | | trafficserver +| `hsts-include-subdomains` | Add includeSubdomains to the HSTS header. | | trafficserver ## Authentication related -| Name | Meaning -| --- | --- -| `auth-type` | Authentication type: `basic`, `digest`, ... (nginx, haproxy, trafficserver) -| `auth-secret` | Secret name for authentication. (nginx, haproxy, trafficserver) -| `auth-realm` | Authentication realm. (nginx, haproxy, trafficserver) -| `auth-tls-secret` | Name of secret for TLS client certification validation. (nginx, haproxy) -| `auth-tls-verify-depth` | Maximum chain length of TLS client certificate. (nginx) -| `auth-satisfy` | Behaviour when more than one of `auth-type`, `auth-tls-secret` or `whitelist-source-range` are configured: `all` (default) or `any`. (trafficserver) | `trafficserver` -| `whitelist-source-range` | Comma-separate list of IP addresses to restrict access to. (nginx, haproxy, trafficserver) +| Name | Meaning | Default | Controller +| --- | --- | --- | --- | +| `auth-type` | Authentication type: `basic`, `digest`, ... | | nginx, haproxy, trafficserver +| `auth-secret` | Secret name for authentication. | | nginx, haproxy, trafficserver +| `auth-realm` | Authentication realm. | | nginx, haproxy, trafficserver +| `auth-tls-secret` | Name of secret for TLS client certification validation. | | nginx, haproxy +| `auth-tls-verify-depth` | Maximum chain length of TLS client certificate. | | nginx +| `auth-tls-error-page` | The page that user should be redirected in case of Auth error | | string +| `auth-satisfy` | Behaviour when more than one of `auth-type`, `auth-tls-secret` or `whitelist-source-range` are configured: `all` or `any`. | `all` | trafficserver | `trafficserver` +| `whitelist-source-range` | Comma-separate list of IP addresses to enable access to. | | nginx, haproxy, trafficserver ## URL related -| Name | Meaning -| --- | --- -| `app-root` | Redirect requests without a path (i.e., for `/`) to this location. (nginx, trafficserver) -| `rewrite-target` | Replace matched Ingress `path` with this value. (nginx, trafficserver) -| `add-base-url` | Add `` tag to HTML. (nginx) -| `preserve-host` | Whether to pass the client request host (`true`) or the origin hostname (`false`) in the HTTP Host field. (trafficserver) +| Name | Meaning | Default | Controller +| --- | --- | --- | --- | +| `app-root` | Redirect requests without a path (i.e., for `/`) to this location. | | nginx, haproxy, trafficserver +| `rewrite-target` | Replace matched Ingress `path` with this value. | | nginx, trafficserver +| `add-base-url` | Add `` tag to HTML. | | nginx +| `base-url-scheme` | Specify the scheme of the `` tags. | | nginx +| `preserve-host` | Whether to pass the client request host (`true`) or the origin hostname (`false`) in the HTTP Host field. | | trafficserver ## Miscellaneous -| Name | Meaning -| --- | --- -| `configuration-snippet` | Arbitrary text to put in the generated configuration file. (nginx) -| `enable-cors` | Enable CORS headers in response. (nginx) -| `limit-connections` | Limit concurrent connections per IP address[1]. (nginx) -| `limit-rps` | Limit requests per second per IP address[1]. (nginx) -| `affinity` | Specify a method to stick clients to origins across requests. Found in `nginx`, where the only supported value is `cookie`. (nginx) -| `session-cookie-name` | When `affinity` is set to `cookie`, the name of the cookie to use. (nginx) -| `session-cookie-hash` | When `affinity` is set to `cookie`, the hash algorithm used: `md5`, `sha`, `index`. (nginx) -| `proxy-body-size` | Maximum request body size. (nginx) -| `follow-redirects` | Follow HTTP redirects in the response and deliver the redirect target to the client. (trafficserver) +| Name | Meaning | Default | Controller +| --- | --- | --- | --- | +| `configuration-snippet` | Arbitrary text to put in the generated configuration file. | | nginx +| `enable-cors` | Enable CORS headers in response. | | nginx +| `limit-connections` | Limit concurrent connections per IP address[1]. | | nginx +| `limit-rps` | Limit requests per second per IP address[1]. | | nginx +| `limit-rpm` | Limit requests per minute per IP address. | | nginx +| `affinity` | Specify a method to stick clients to origins across requests. Found in `nginx`, where the only supported value is `cookie`. | | nginx +| `session-cookie-name` | When `affinity` is set to `cookie`, the name of the cookie to use. | | nginx +| `session-cookie-hash` | When `affinity` is set to `cookie`, the hash algorithm used: `md5`, `sha`, `index`. | | nginx +| `proxy-body-size` | Maximum request body size. | | nginx, haproxy +| `proxy-pass-params` | Parameters for proxy-pass directives. | | +| `follow-redirects` | Follow HTTP redirects in the response and deliver the redirect target to the client. | | trafficserver +| `kubernetes.io/ingress.global-static-ip-name` | Name of the static global IP address in GCP to use when provisioning the HTTPS load balancer. | empty string | gce [1] The documentation for the `nginx` controller says that only one of `limit-connections` or `limit-rps` may be specified; it's not clear why this is. ## Caching -| Name | Meaning -| --- | --- -| `cache-enable` | Cache responses according to Expires or Cache-Control headers (trafficserver) -| `cache-generation` | An arbitrary numeric value included in the cache key; changing this effectively clears the cache for this ingress. (trafficserver) -| `cache-ignore-query-params` | Space-separate list of globs matching URL parameters to ignore when doing cache lookups. (trafficserver) -| `cache-whitelist-query-params` | Ignore any URL parameters not in this whitespace-separate list of globs. (trafficserver) -| `cache-sort-query-params` | Lexically sort the query parameters by name before cache lookup. (trafficserver) -| `cache-ignore-cookies` | Requests containing a `Cookie:` header will not use the cache unless all the cookie names match this whitespace-separate list of globs. (trafficserver) +| Name | Meaning | Default | Controller +| --- | --- | --- | --- | +| `cache-enable` | Cache responses according to Expires or Cache-Control headers. | | trafficserver +| `cache-generation` | An arbitrary numeric value included in the cache key; changing this effectively clears the cache for this ingress. | | trafficserver +| `cache-ignore-query-params` | Space-separate list of globs matching URL parameters to ignore when doing cache lookups. | | trafficserver +| `cache-whitelist-query-params` | Ignore any URL parameters not in this whitespace-separate list of globs. | | trafficserver +| `cache-sort-query-params` | Lexically sort the query parameters by name before cache lookup. | | trafficserver +| `cache-ignore-cookies` | Requests containing a `Cookie:` header will not use the cache unless all the cookie names match this whitespace-separate list of globs. | | trafficserver diff --git a/docs/catalog.md b/docs/catalog.md index 7d83f6518..759322a31 100644 --- a/docs/catalog.md +++ b/docs/catalog.md @@ -1,4 +1,4 @@ -# Ingress controller catalog +# Ingress controller Catalog This is a non-comprehensive list of existing ingress controllers. diff --git a/docs/dev/README.md b/docs/dev/README.md index a8fa4c512..968ffc3da 100644 --- a/docs/dev/README.md +++ b/docs/dev/README.md @@ -1,12 +1,18 @@ -# Ingress development guide +# Ingress Development Guide -This directory is intended to be the canonical source of truth for things like writing and hacking on Ingress controllers. If you find a requirement that this doc does not capture, please submit an issue on github. If you find other docs with references to requirements that are not simply links to this doc, please submit an issue. +This directory is intended to be the canonical source of truth for things like +writing and hacking on Ingress controllers. If you find a requirement that this +doc does not capture, please submit an issue on github. If you find other docs +with references to requirements that are not simply links to this doc, please +submit an issue. -This document is intended to be relative to the branch in which it is found. It is guaranteed that requirements will change over time for the development branch, but release branches of Kubernetes should not change. +This document is intended to be relative to the branch in which it is found. +It is guaranteed that requirements will change over time for the development +branch, but release branches of Kubernetes should not change. ## Navigation -* [Build, test or release](releases.md) an existing controller -* [Setup a cluster](setup.md) to hack at an existing controller -* [Write your own](devel.md) controller +* [Build, test, release](getting-started.md) an existing controller +* [Setup a cluster](setup-cluster.md) to hack at an existing controller +* [Write your own](custom-controller.md) controller diff --git a/docs/dev/devel.md b/docs/dev/custom-controller.md similarity index 100% rename from docs/dev/devel.md rename to docs/dev/custom-controller.md diff --git a/docs/dev/getting-started.md b/docs/dev/getting-started.md new file mode 100644 index 000000000..6c01d1170 --- /dev/null +++ b/docs/dev/getting-started.md @@ -0,0 +1,141 @@ +# Getting Started + +This document explains how to get started with developing for Kubernetes Ingress. +It includes how to build, test, and release ingress controllers. + +## Dependencies + +The build uses dependencies in the `ingress/vendor` directory, which +must be installed before building a binary/image. Occasionally, you +might need to update the dependencies. + +This guide requires you to install the [godep](https://github.com/tools/godep) dependency +tool. + +Check the version of `godep` you are using and make sure it is up to date. +```console +$ godep version +godep v74 (linux/amd64/go1.6.1) +``` + +If you have an older version of `godep`, you can update it as follows: +```console +$ cd $GOPATH/src/ingress +$ go get github.com/tools/godep +$ cd $GOPATH/src/github.com/tools/godep +$ go build -o godep *.go +``` + +This will automatically save the dependencies to the `vendor/` directory. +```console +$ cd $GOPATH/src/ingress +$ godep save ./... +``` + +In general, you can follow [this guide](https://github.com/kubernetes/community/blob/master/contributors/devel/godep.md#using-godep-to-manage-dependencies) to update dependencies. +To update a particular dependency, eg: Kubernetes: +```console +$ cd $GOPATH/src/k8s.io/ingress +$ godep restore +$ go get -u k8s.io/kubernetes +$ cd $GOPATH/src/k8s.io/kubernetes +$ godep restore +$ cd $GOPATH/src/k8s.io/kubernetes/ingress +$ rm -rf Godeps +$ godep save ./... +$ git [add/remove] as needed +$ git commit +``` + +## Building + +All ingress controllers are built through a Makefile. Depending on your +requirements you can build a raw server binary, a local container image, +or push an image to a remote repository. + +In order to use your local Docker, you may need to set the following environment variables: +```console +# "gcloud docker" (default) or "docker" +$ export DOCKER= + +# "gcr.io/google_containers" (default), "index.docker.io", or your own registry +$ export REGISTRY= +``` +To find the registry simply run: `docker system info | grep Registry` + +### Nginx Controller + +Build a raw server binary +```console +$ make controllers +``` + +[TODO](https://github.com/kubernetes/ingress/issues/387): add more specific instructions needed for raw server binary. + +Build a local container image +```console +$ make docker-build TAG= PREFIX=$USER/ingress-controller +``` + +Push the container image to a remote repository +```console +$ make docker-push TAG= PREFIX=$USER/ingress-controller +``` + +### GCE Controller + +[TODO](https://github.com/kubernetes/ingress/issues/387): add instructions on building gce controller. + +## Deploying + +There are several ways to deploy the ingress controller onto a cluster. If you don't have a cluster start by +creating one [here](setup-cluster.md). + +* [nginx controller](../../examples/deployment/nginx/README.md) +* [gce controller](../../examples/deployment/gce/README.md) + +## Testing + +To run unit-tests, enter each directory in `controllers/` +```console +$ cd $GOPATH/src/k8s.io/ingress/controllers/ +$ go test ./... +``` + +If you have access to a Kubernetes cluster, you can also run e2e tests using ginkgo. +```console +$ cd $GOPATH/src/k8s.io/kubernetes +$ ./hack/ginkgo-e2e.sh --ginkgo.focus=Ingress.* --delete-namespace-on-failure=false +``` + +See also [related FAQs](../faq#how-are-the-ingress-controllers-tested). + +[TODO](https://github.com/kubernetes/ingress/issues/5): add instructions on running integration tests, or e2e against +local-up/minikube. + +## Releasing + +All Makefiles will produce a release binary, as shown above. To publish this +to a wider Kubernetes user base, push the image to a container registry, like +[gcr.io](https://cloud.google.com/container-registry/). All release images are hosted under `gcr.io/google_containers` and +tagged according to a [semver](http://semver.org/) scheme. + +An example release might look like: +``` +$ make push TAG=0.8.0 PREFIX=gcr.io/google_containers/glbc +``` + +Please follow these guidelines to cut a release: + +* Update the [release](https://help.github.com/articles/creating-releases/) +page with a short description of the major changes that correspond to a given +image tag. +* Cut a release branch, if appropriate. Release branches follow the format of +`controller-release-version`. Typically, pre-releases are cut from HEAD. +All major feature work is done in HEAD. Specific bug fixes are +cherry-picked into a release branch. +* If you're not confident about the stability of the code, +[tag](https://help.github.com/articles/working-with-tags/) it as alpha or beta. +Typically, a release branch should have stable code. + + diff --git a/docs/dev/releases.md b/docs/dev/releases.md deleted file mode 100644 index cd3aee7c8..000000000 --- a/docs/dev/releases.md +++ /dev/null @@ -1,108 +0,0 @@ -# Releases - -This doc explains how to build, test and release ingress controllers. - -## Building - -All ingress controllers are built through a Makefile. Depending on your -requirements you can build a raw server binary, a local container image, -or push an image to a remote repository. - -Build a raw server binary -```console -$ make controllers -``` - -Build a local container image -```console -$ make docker-build TAG=0.0 PREFIX=$USER/ingress-controller -``` - -Push the container image to a remote repository -```console -$ make docker-push TAG=0.0 PREFIX=$USER/ingress-controller -``` - -## Dependencies - -The build should use dependencies in the `ingress/vendor` directory. -Occasionally, you might need to update the dependencies. - -```console -$ godep version -godep v74 (linux/amd64/go1.6.1) -$ go version -go version go1.6.1 linux/amd64 -``` - -This will automatically save godeps to `vendor/` -```console -$ godep save ./... -``` - -If you have an older version of `godep` -```console -$ go get github.com/tools/godep -$ cd $GOPATH/src/github.com/tools/godep -$ go build -o godep *.go -``` - -In general, you can follow [this guide](https://github.com/kubernetes/kubernetes/blob/release-1.5/docs/devel/godep.md#using-godep-to-manage-dependencies) -to update godeps. To update a particular dependency, eg: Kubernetes: -```console -cd $GOPATH/src/k8s.io/ingress -godep restore -go get -u k8s.io/kubernetes -cd $GOPATH/src/k8s.io/kubernetes -godep restore -cd $GOPATH/src/k8s.io/kubernetes/ingress -rm -rf Godeps -godep save ./... -git [add/remove] as needed -git commit -``` - -## Testing - -To run unittets, enter each directory in `controllers/` -```console -$ cd $GOPATH/src/k8s.io/ingress/controllers/gce -$ go test ./... -``` - -If you have access to a Kubernetes cluster, you can also run e2e tests -```console -$ cd $GOPATH/src/k8s.io/kubernetes -$ ./hack/ginkgo-e2e.sh --ginkgo.focus=Ingress.* --delete-namespace-on-failure=false -``` - -See also [related FAQs](../faq#how-are-the-ingress-controllers-tested). - -[TODO](https://github.com/kubernetes/ingress/issues/5): add instructions on running integration tests, or e2e against -local-up/minikube. - -## Releasing - -All Makefiles will produce a release binary, as shown above. To publish this -to a wider Kubernetes user base, push the image to a container registry, like -[gcr.io](https://cloud.google.com/container-registry/). All release images are hosted under `gcr.io/google_containers` and -tagged according to a [semver](http://semver.org/) scheme. - -An example release might look like: -``` -$ make push TAG=0.8.0 PREFIX=gcr.io/google_containers/glbc -``` - -Please follow these guidelines to cut a release: - -* Update the [release](https://help.github.com/articles/creating-releases/) -page with a short description of the major changes that correspond to a given -image tag. -* Cut a release branch, if appropriate. Release branches follow the format of -`controller-release-version`. Typically, pre-releases are cut from HEAD. -All major feature work is done in HEAD. Specific bug fixes are -cherrypicked into a release branch. -* If you're not confident about the stability of the code, tag it as -alpha or beta. Typically, a release branch should have stable code. - - diff --git a/docs/dev/setup.md b/docs/dev/setup-cluster.md similarity index 95% rename from docs/dev/setup.md rename to docs/dev/setup-cluster.md index 76e0397eb..06aa9a630 100644 --- a/docs/dev/setup.md +++ b/docs/dev/setup-cluster.md @@ -1,9 +1,10 @@ -# Developer setup +# Cluster Getting Started This doc outlines the steps needed to setup a local dev cluster within which you -can deploy/test an ingress controller. +can deploy/test an ingress controller. Note that you can also setup the ingress controller +locally. -## Deploy a dev cluster +## Deploy a Development cluster ### Single node local cluster diff --git a/docs/faq/README.md b/docs/faq/README.md index 60b665cdf..2ad4bfff3 100644 --- a/docs/faq/README.md +++ b/docs/faq/README.md @@ -6,7 +6,7 @@ in this directory with site specific information. Table of Contents ================= -* [How is Ingress different from Services?](#how-is-ingress-different-from-services) +* [How is Ingress different from a Service?](#how-is-ingress-different-from-a-service) * [I created an Ingress and nothing happens, what now?](#i-created-an-ingress-and-nothing-happens-what-now) * [How do I deploy an Ingress controller?](#how-do-i-deploy-an-ingress-controller) * [Are Ingress controllers namespaced?](#are-ingress-controllers-namespaced) @@ -32,7 +32,7 @@ or directly to the Service's endpoints. Run `describe` on the Ingress. If you see create/add events, you have an Ingress controller running in the cluster, otherwise, you either need to deploy or -restart your Ingress controller. If the events associaged with an Ingress are +restart your Ingress controller. If the events associated with an Ingress are insufficient to debug, consult the controller specific FAQ. ## How do I deploy an Ingress controller? @@ -46,7 +46,7 @@ controller by following [this](/examples/deployment) example. Ingress is namespaced, this means 2 Ingress objects can have the same name in 2 namespaces, and must only point to Services in its own namespace. An admin can deploy an Ingress controller such that it only satisfies Ingress from a given -namespace, but by default, controllers will watch the entire kubernetes cluster +namespace, but by default, controllers will watch the entire Kubernetes cluster for unsatisfied Ingress. ## How do I disable an Ingress controller? @@ -86,7 +86,7 @@ as well as in [this](/examples/daemonset/nginx) example. First check the [catalog](#is-there-a-catalog-of-existing-ingress-controllers), to make sure you really need to write one. 1. Write a [generic backend](/examples/custom-controller) -2. Keep it in your own repo, make sure it passes the [conformance suite](https://github.com/kubernetes/kubernetes/blob/master/test/e2e/ingress_utils.go#L112) +2. Keep it in your own repo, make sure it passes the [conformance suite](https://github.com/kubernetes/kubernetes/blob/master/test/e2e/framework/ingress_utils.go#L129) 3. Submit an example(s) in the appropriate subdirectories [here](/examples/README.md) 4. Add it to the catalog @@ -97,14 +97,14 @@ Yes, a non-comprehensive [catalog](/docs/catalog.md) exists. ## How are the Ingress controllers tested? Testing for the Ingress controllers is divided between: -* Ingress repo: unittests and pre-submit integration tests run via travis +* Ingress repo: unit tests and pre-submit integration tests run via travis * Kubernetes repo: [pre-submit e2e](https://k8s-testgrid.appspot.com/google-gce#gce&include-filter-by-regex=Loadbalancing), [post-merge e2e](https://k8s-testgrid.appspot.com/google-gce#gci-gce-ingress), [per release-branch e2e](https://k8s-testgrid.appspot.com/google-gce#gci-gce-ingress-1.5) The configuration for jenkins e2e tests are located [here](https://github.com/kubernetes/test-infra). -The Ingress E2Es are located [here](https://github.com/kubernetes/kubernetes/blob/master/test/e2e/ingress.go), -each controller added to that suite must consistently pass the [conformance suite](https://github.com/kubernetes/kubernetes/blob/master/test/e2e/ingress_utils.go#L112). +The Ingress E2Es are located [here](https://github.com/kubernetes/kubernetes/blob/master/test/e2e/network/ingress.go), +each controller added to that suite must consistently pass the [conformance suite](https://github.com/kubernetes/kubernetes/blob/master/test/e2e/framework/ingress_utils.go#L129). ## An Ingress controller E2E is failing, what should I do? @@ -128,7 +128,7 @@ test project. If you think the failures are related to project quota, cleanup leaked resources and bump up quota before debugging the leak. If the preceding identification process fails, it's likely that the Ingress api -is broked upstream. Try to setup a [dev environment](/docs/dev/setup.md) from +is broken upstream. Try to setup a [dev environment](/docs/dev/setup-cluster.md) from HEAD and create an Ingress. You should be deploying the [latest](https://github.com/kubernetes/ingress/releases) release image to the local cluster. @@ -147,4 +147,3 @@ to serialize into a flat roadmap. You might be interested in the following issue * Ingress [classes](https://github.com/kubernetes/kubernetes/issues/30151) As well as the issues in this repo. - diff --git a/docs/faq/gce.md b/docs/faq/gce.md index 122556f10..b9428608f 100644 --- a/docs/faq/gce.md +++ b/docs/faq/gce.md @@ -273,7 +273,7 @@ If you find yourself in such a situation, you can delete the resources by hand: 1. Navigate to the [cloud console](https://console.cloud.google.com/) and click on the "Networking" tab, then choose "LoadBalancing" 2. Find the loadbalancer you'd like to delete, it should have a name formatted as: k8s-um-ns-name--UUID -3. Delete it, check the boxes to also casade the deletion down to associated resources (eg: backend-services) +3. Delete it, check the boxes to also cascade the deletion down to associated resources (eg: backend-services) 4. Switch to the "Compute Engine" tab, then choose "Instance Groups" 5. Delete the Instance Group allocated for the leaked Ingress, it should have a name formatted as: k8s-ig-UUID diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index f10143790..16d7e3054 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -136,7 +136,7 @@ $ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes If it is not working, there are two possible reasons: -1. The contents of the tokens is invalid. Find the secret name with `kubectl get secrets | grep service-account` and +1. The contents of the tokens are invalid. Find the secret name with `kubectl get secrets | grep service-account` and delete it with `kubectl delete secret `. It will automatically be recreated. 2. You have a non-standard Kubernetes installation and the file containing the token @@ -153,7 +153,7 @@ More information: * [User Guide: Service Accounts](http://kubernetes.io/docs/user-guide/service-accounts/) * [Cluster Administrator Guide: Managing Service Accounts](http://kubernetes.io/docs/admin/service-accounts-admin/) -## kubeconfig +## Kubeconfig If you want to use a kubeconfig file for authentication, create a deployment file similar to the one below: *Note:* the important part is the flag `--kubeconfig=/etc/kubernetes/kubeconfig.yaml`. @@ -193,6 +193,9 @@ spec: containers: - name: default-http-backend image: gcr.io/google_containers/defaultbackend:1.0 + volumeMounts: + - mountPath: /etc/kubernetes + name: kubeconfig livenessProbe: httpGet: path: /healthz @@ -259,7 +262,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: ingress-nginx imagePullPolicy: Always ports: @@ -294,4 +297,4 @@ spec: - name: "kubeconfig" hostPath: path: "/etc/kubernetes/" -``` \ No newline at end of file +``` diff --git a/examples/PREREQUISITES.md b/examples/PREREQUISITES.md index a2c12ad2a..03f9f74a4 100644 --- a/examples/PREREQUISITES.md +++ b/examples/PREREQUISITES.md @@ -40,7 +40,7 @@ secret "tls-secret" created You can act as your very own CA, or use an existing one. As an exercise / learning, we're going to generate our own CA, and also generate a client certificate. -These instructions are based in CoreOS OpenSSL [instructions](https://coreos.com/kubernetes/docs/latest/openssl.html) +These instructions are based on CoreOS OpenSSL [instructions](https://coreos.com/kubernetes/docs/latest/openssl.html) ### Generating a CA @@ -72,7 +72,7 @@ This will generate two files: A private key (ca.key) and a public key (ca.crt). The ca.crt can be used later in the step of creation of CA authentication secret. ### Generating the client certificate -The following steps generates a client certificate signed by the CA generated above. This client can be +The following steps generate a client certificate signed by the CA generated above. This client can be used to authenticate in a tls-auth configured ingress. First, we need to generate an 'openssl.cnf' file that will be used while signing the keys: diff --git a/examples/README.md b/examples/README.md index ea8d8c0ac..09a597d71 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,7 +1,7 @@ # Ingress examples This directory contains a catalog of examples on how to run, configure and -scale Ingress. Please review the [prerequisities](PREREQUISITES.md) before +scale Ingress. Please review the [prerequisites](PREREQUISITES.md) before trying them. ## Basic cross platform @@ -47,8 +47,8 @@ Multi-cluster | bridge Kubernetes clusters with Ingress | gce | Advanced Name | Description | Platform | Complexity Level -----| ----------- | ---------- | ---------------- Session stickyness | route requests consistently to the same endpoint | nginx | Advanced -Least connections | route requests based on least connections | on-perm | Advanced -Weights | route requrests to backends based on weights | nginx | Advanced +Least connections | route requests based on least connections | on-prem | Advanced +Weights | route requests to backends based on weights | nginx | Advanced ## Routing diff --git a/examples/auth/basic/haproxy/README.md b/examples/auth/basic/haproxy/README.md index f9c87b1d3..621129405 100644 --- a/examples/auth/basic/haproxy/README.md +++ b/examples/auth/basic/haproxy/README.md @@ -12,9 +12,6 @@ This document has the following prerequisites: end up with controller, a sample web app and an ingress resource to the `foo.bar` domain -As mentioned in the deployment instructions, you MUST turn down any existing -ingress controllers before running HAProxy Ingress. - ## Using Basic Authentication HAProxy Ingress read user and password from `auth` file stored on secrets, one user diff --git a/examples/auth/basic/nginx/README.md b/examples/auth/basic/nginx/README.md new file mode 100644 index 000000000..fc70bdc11 --- /dev/null +++ b/examples/auth/basic/nginx/README.md @@ -0,0 +1,125 @@ + +This example shows how to add authentication in a Ingress rule using a secret that contains a file generated with `htpasswd`. + +``` +$ htpasswd -c auth foo +New password: +New password: +Re-type new password: +Adding password for user foo +``` + +``` +$ kubectl create secret generic basic-auth --from-file=auth +secret "basic-auth" created +``` + +``` +$ kubectl get secret basic-auth -o yaml +apiVersion: v1 +data: + auth: Zm9vOiRhcHIxJE9GRzNYeWJwJGNrTDBGSERBa29YWUlsSDkuY3lzVDAK +kind: Secret +metadata: + name: basic-auth + namespace: default +type: Opaque +``` + +``` +echo " +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress-with-auth + annotations: + # type of authentication + ingress.kubernetes.io/auth-type: basic + # name of the secret that contains the user/password definitions + ingress.kubernetes.io/auth-secret: basic-auth + # message to display with an appropiate context why the authentication is required + ingress.kubernetes.io/auth-realm: "Authentication Required - foo" +spec: + rules: + - host: foo.bar.com + http: + paths: + - path: / + backend: + serviceName: echoheaders + servicePort: 80 +" | kubectl create -f - +``` + +``` +$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' +* Trying 10.2.29.4... +* Connected to 10.2.29.4 (10.2.29.4) port 80 (#0) +> GET / HTTP/1.1 +> Host: foo.bar.com +> User-Agent: curl/7.43.0 +> Accept: */* +> +< HTTP/1.1 401 Unauthorized +< Server: nginx/1.10.0 +< Date: Wed, 11 May 2016 05:27:23 GMT +< Content-Type: text/html +< Content-Length: 195 +< Connection: keep-alive +< WWW-Authenticate: Basic realm="Authentication Required - foo" +< + +401 Authorization Required + +

401 Authorization Required

+
nginx/1.10.0
+ + +* Connection #0 to host 10.2.29.4 left intact +``` + +``` +$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' -u 'foo:bar' +* Trying 10.2.29.4... +* Connected to 10.2.29.4 (10.2.29.4) port 80 (#0) +* Server auth using Basic with user 'foo' +> GET / HTTP/1.1 +> Host: foo.bar.com +> Authorization: Basic Zm9vOmJhcg== +> User-Agent: curl/7.43.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Server: nginx/1.10.0 +< Date: Wed, 11 May 2016 06:05:26 GMT +< Content-Type: text/plain +< Transfer-Encoding: chunked +< Connection: keep-alive +< Vary: Accept-Encoding +< +CLIENT VALUES: +client_address=10.2.29.4 +command=GET +real path=/ +query=nil +request_version=1.1 +request_uri=http://foo.bar.com:8080/ + +SERVER VALUES: +server_version=nginx: 1.9.11 - lua: 10001 + +HEADERS RECEIVED: +accept=*/* +authorization=Basic Zm9vOmJhcg== +connection=close +host=foo.bar.com +user-agent=curl/7.43.0 +x-forwarded-for=10.2.29.1 +x-forwarded-host=foo.bar.com +x-forwarded-port=80 +x-forwarded-proto=http +x-real-ip=10.2.29.1 +BODY: +* Connection #0 to host 10.2.29.4 left intact +-no body in request- +``` diff --git a/examples/auth/client-certs/haproxy/README.md b/examples/auth/client-certs/haproxy/README.md index 75813ff1b..b4078b4b5 100644 --- a/examples/auth/client-certs/haproxy/README.md +++ b/examples/auth/client-certs/haproxy/README.md @@ -17,9 +17,6 @@ following these steps you should have a secret named `caingress`, a certificate * Use these same steps and create another CA and generate another certificate and private key `fake.crt` and `fake.key` just for testing -As mentioned in the deployment instructions, you MUST turn down any existing -ingress controllers before running HAProxy Ingress. - Secret, certificates and keys can be created using these shortcuts: CA and it's secret: diff --git a/examples/auth/client-certs/nginx/README.md b/examples/auth/client-certs/nginx/README.md index 730a802fa..e8c9e83ac 100644 --- a/examples/auth/client-certs/nginx/README.md +++ b/examples/auth/client-certs/nginx/README.md @@ -5,7 +5,7 @@ This example demonstrates how to enable the TLS Authentication through the nginx ## Terminology * CA: Certificate authority signing the client cert, in this example we will play the role of a CA. -You can generate a CA cert as show in this doc. +You can generate a CA cert as shown in this doc. * CA Certificate(s) - Certificate Authority public key. Client certs must chain back to this cert, meaning the Issuer field of some certificate in the chain leading up to the client cert must contain @@ -32,7 +32,7 @@ Certificate Authentication is achieved through 2 annotations on the Ingress, as | --- | --- | --- | |ingress.kubernetes.io/auth-tls-secret|Sets the secret that contains the authorized CA Chain|string| |ingress.kubernetes.io/auth-tls-verify-depth|The verification depth Certificate Authentication will make|number (default to 1)| - +|ingress.kubernetes.io/auth-tls-error-page|The page that user should be redirected in case of Auth error|string (default to empty| The following command instructs the controller to enable TLS authentication using the secret from the ``ingress.kubernetes.io/auth-tls-secret`` annotation on the Ingress. Clients must present this cert to the loadbalancer, or they will receive a HTTP 400 response @@ -61,6 +61,7 @@ Rules: Annotations: auth-tls-secret: default/caingress auth-tls-verify-depth: 3 + auth-tls-error-page: http://www.mysite.com/error-cert.html Events: FirstSeen LastSeen Count From SubObjectPath Type Reason Message diff --git a/examples/auth/client-certs/nginx/nginx-tls-auth.yaml b/examples/auth/client-certs/nginx/nginx-tls-auth.yaml index 23cac7b49..ac03d9d7c 100644 --- a/examples/auth/client-certs/nginx/nginx-tls-auth.yaml +++ b/examples/auth/client-certs/nginx/nginx-tls-auth.yaml @@ -5,6 +5,7 @@ metadata: # Create this with kubectl create secret generic caingress --from-file=ca.crt --namespace=default ingress.kubernetes.io/auth-tls-secret: "default/caingress" ingress.kubernetes.io/auth-tls-verify-depth: "3" + auth-tls-error-page: "http://www.mysite.com/error-cert.html" kubernetes.io/ingress.class: "nginx" name: nginx-test namespace: default diff --git a/examples/aws/nginx/README.md b/examples/aws/nginx/README.md index d9b1d59d5..3245093ea 100644 --- a/examples/aws/nginx/README.md +++ b/examples/aws/nginx/README.md @@ -14,7 +14,7 @@ This command creates: Is the proxy protocol necessary? -No but only enabling the procotol is possible to keep the real source IP address requesting the connection. +No but only enabling the protocol is possible to keep the real source IP address requesting the connection. ### References diff --git a/examples/aws/nginx/nginx-ingress-controller.yaml b/examples/aws/nginx/nginx-ingress-controller.yaml index 77ef74359..b05eddd02 100644 --- a/examples/aws/nginx/nginx-ingress-controller.yaml +++ b/examples/aws/nginx/nginx-ingress-controller.yaml @@ -101,7 +101,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: ingress-nginx imagePullPolicy: Always ports: diff --git a/examples/custom-controller/README.md b/examples/custom-controller/README.md index f420ae780..64c439cea 100644 --- a/examples/custom-controller/README.md +++ b/examples/custom-controller/README.md @@ -2,7 +2,7 @@ This example contains the source code of a simple dummy controller. If you want more details on the interface, or what the generic controller is actually doing, -please read [this doc](/docs/dev/devel.md). You can deploy the controller as +please read [this doc](/docs/dev/getting-started.md). You can deploy the controller as follows: ```console diff --git a/examples/custom-controller/server.go b/examples/custom-controller/server.go index f1b4d6b84..6d54e4b7a 100644 --- a/examples/custom-controller/server.go +++ b/examples/custom-controller/server.go @@ -24,7 +24,8 @@ import ( "github.com/spf13/pflag" - api "k8s.io/client-go/pkg/api/v1" + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" nginxconfig "k8s.io/ingress/controllers/nginx/pkg/config" "k8s.io/ingress/core/pkg/ingress" @@ -105,3 +106,16 @@ func (n DummyController) SetListers(lister ingress.StoreLister) { func (n DummyController) DefaultIngressClass() string { return "dummy" } + +func (n DummyController) UpdateIngressStatus(*extensions.Ingress) []api.LoadBalancerIngress { + return nil +} + +// DefaultEndpoint returns the default endpoint to be use as default server that returns 404. +func (n DummyController) DefaultEndpoint() ingress.Endpoint { + return ingress.Endpoint{ + Address: "127.0.0.1", + Port: "8181", + Target: &api.ObjectReference{}, + } +} diff --git a/examples/customization/configuration-snippets/nginx/nginx-ingress-controller.yaml b/examples/customization/configuration-snippets/nginx/nginx-ingress-controller.yaml index 92cae7d84..a1f4a9eca 100644 --- a/examples/customization/configuration-snippets/nginx/nginx-ingress-controller.yaml +++ b/examples/customization/configuration-snippets/nginx/nginx-ingress-controller.yaml @@ -19,7 +19,7 @@ spec: # hostNetwork: true terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-controller readinessProbe: httpGet: diff --git a/examples/customization/custom-configuration/haproxy/README.md b/examples/customization/custom-configuration/haproxy/README.md index a0eafc7a3..77b13ba15 100644 --- a/examples/customization/custom-configuration/haproxy/README.md +++ b/examples/customization/custom-configuration/haproxy/README.md @@ -1,15 +1,12 @@ # Customize the HAProxy configuration -This example use a [ConfigMap](https://kubernetes.io/docs/user-guide/configmap/) to customize the HAProxy configuration. +This example uses a [ConfigMap](https://kubernetes.io/docs/user-guide/configmap/) to customize the HAProxy configuration. ## Prerequisites This document has the following prerequisites: -Deploy only the tls-secret and the default backend from the [deployment instructions](../../../deployment/haproxy/) - -As mentioned in the deployment instructions, you MUST turn down any existing -ingress controllers before running HAProxy Ingress. +Deploy only the tls-secret and the default backend from the [deployment instructions](/examples/deployment/haproxy) ## Customize the HAProxy configuration @@ -18,20 +15,21 @@ Using a [ConfigMap](https://kubernetes.io/docs/user-guide/configmap/) is possibl For example, if we want to change the syslog-endpoint we need to create a ConfigMap: ``` -$ kubectl create configmap haproxy-conf --from-literal=syslog-endpoint=172.17.8.101 - +$ kubectl create configmap haproxy-ingress --from-literal=syslog-endpoint=172.17.8.101 ``` Create the HAProxy Ingress deployment: + ``` $ kubectl create -f haproxy-custom-configuration.yaml ``` The only difference from the deployment instructions is the --configmap parameter: + ``` - --configmap=default/haproxy-conf ``` If the Configmap it is updated, HAProxy will be reloaded with the new configuration. -Check all the config options in the [HAProxy Ingress docs](https://github.com/jcmoraisjr/haproxy-ingress#configmap) \ No newline at end of file +Check all the config options in the [HAProxy Ingress docs](https://github.com/jcmoraisjr/haproxy-ingress#configmap) diff --git a/examples/customization/custom-configuration/haproxy/haproxy-custom-configuration.yaml b/examples/customization/custom-configuration/haproxy/haproxy-custom-configuration.yaml index 694179f73..796e4c3f8 100644 --- a/examples/customization/custom-configuration/haproxy/haproxy-custom-configuration.yaml +++ b/examples/customization/custom-configuration/haproxy/haproxy-custom-configuration.yaml @@ -5,7 +5,6 @@ metadata: run: haproxy-ingress name: haproxy-ingress spec: - replicas: 1 selector: matchLabels: run: haproxy-ingress @@ -18,9 +17,9 @@ spec: - name: haproxy-ingress image: quay.io/jcmoraisjr/haproxy-ingress args: - - --default-backend-service=default/ingress-default-backend - - --default-ssl-certificate=default/tls-secret - - --configmap=default/haproxy-conf + - --default-backend-service=$(POD_NAMESPACE)/ingress-default-backend + - --default-ssl-certificate=$(POD_NAMESPACE)/tls-secret + - --configmap=$(POD_NAMESPACE)/haproxy-ingress ports: - name: http containerPort: 80 diff --git a/examples/customization/custom-configuration/nginx/nginx-custom-configuration.yaml b/examples/customization/custom-configuration/nginx/nginx-custom-configuration.yaml index 3b29bd3ad..2a44b15e1 100644 --- a/examples/customization/custom-configuration/nginx/nginx-custom-configuration.yaml +++ b/examples/customization/custom-configuration/nginx/nginx-custom-configuration.yaml @@ -22,7 +22,7 @@ spec: # hostNetwork: true terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-controller readinessProbe: httpGet: diff --git a/examples/customization/custom-configuration/nginx/nginx-load-balancer-conf.yaml b/examples/customization/custom-configuration/nginx/nginx-load-balancer-conf.yaml index afd63d06d..aaeef5b0c 100644 --- a/examples/customization/custom-configuration/nginx/nginx-load-balancer-conf.yaml +++ b/examples/customization/custom-configuration/nginx/nginx-load-balancer-conf.yaml @@ -1,8 +1,11 @@ apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-custom-configuration + labels: + k8s-app: nginx-ingress-controller + namespace: kube-system data: proxy-connect-timeout: "10" proxy-read-timeout: "120" proxy-send-timeout: "120" -kind: ConfigMap -metadata: - name: nginx-custom-configuration diff --git a/examples/customization/custom-errors/nginx/rc-custom-errors.yaml b/examples/customization/custom-errors/nginx/rc-custom-errors.yaml index fbda21c15..122b539ef 100644 --- a/examples/customization/custom-errors/nginx/rc-custom-errors.yaml +++ b/examples/customization/custom-errors/nginx/rc-custom-errors.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-lb imagePullPolicy: Always readinessProbe: diff --git a/examples/customization/custom-headers/nginx/custom-headers.yaml b/examples/customization/custom-headers/nginx/custom-headers.yaml index beeefc8a4..031d8b2cb 100644 --- a/examples/customization/custom-headers/nginx/custom-headers.yaml +++ b/examples/customization/custom-headers/nginx/custom-headers.yaml @@ -5,5 +5,5 @@ data: X-Using-Nginx-Controller: "true" kind: ConfigMap metadata: - name: proxy-headers + name: custom-headers namespace: kube-system diff --git a/examples/customization/custom-headers/nginx/nginx-ingress-controller.yaml b/examples/customization/custom-headers/nginx/nginx-ingress-controller.yaml index 92cae7d84..a1f4a9eca 100644 --- a/examples/customization/custom-headers/nginx/nginx-ingress-controller.yaml +++ b/examples/customization/custom-headers/nginx/nginx-ingress-controller.yaml @@ -19,7 +19,7 @@ spec: # hostNetwork: true terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-controller readinessProbe: httpGet: diff --git a/examples/customization/custom-template/README.md b/examples/customization/custom-template/README.md index d2b223b51..259ca80a0 100644 --- a/examples/customization/custom-template/README.md +++ b/examples/customization/custom-template/README.md @@ -1,4 +1,4 @@ -This example shows how is possible to use a custom template +This example shows how it is possible to use a custom template First create a configmap with a template inside running: ``` diff --git a/examples/customization/custom-template/custom-template.yaml b/examples/customization/custom-template/custom-template.yaml index 7e110f861..cc64c7930 100644 --- a/examples/customization/custom-template/custom-template.yaml +++ b/examples/customization/custom-template/custom-template.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-lb imagePullPolicy: Always readinessProbe: diff --git a/examples/customization/custom-vts-metrics/nginx/README.md b/examples/customization/custom-vts-metrics/nginx/README.md index e8e678ecf..607e1012d 100644 --- a/examples/customization/custom-vts-metrics/nginx/README.md +++ b/examples/customization/custom-vts-metrics/nginx/README.md @@ -55,7 +55,7 @@ nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s ``` ## Result -Check wether to open the vts status: +Check whether to open the vts status: ```console $ kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf|grep vhost_traffic_status_display vhost_traffic_status_display; diff --git a/examples/customization/custom-vts-metrics/nginx/nginx-ingress-controller.yaml b/examples/customization/custom-vts-metrics/nginx/nginx-ingress-controller.yaml index 896c3ee9a..70cffd6c6 100644 --- a/examples/customization/custom-vts-metrics/nginx/nginx-ingress-controller.yaml +++ b/examples/customization/custom-vts-metrics/nginx/nginx-ingress-controller.yaml @@ -22,7 +22,7 @@ spec: # hostNetwork: true terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-controller readinessProbe: httpGet: diff --git a/examples/customization/ssl-dh-param/nginx/nginx-ingress-controller.yaml b/examples/customization/ssl-dh-param/nginx/nginx-ingress-controller.yaml index 92cae7d84..a1f4a9eca 100644 --- a/examples/customization/ssl-dh-param/nginx/nginx-ingress-controller.yaml +++ b/examples/customization/ssl-dh-param/nginx/nginx-ingress-controller.yaml @@ -19,7 +19,7 @@ spec: # hostNetwork: true terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-controller readinessProbe: httpGet: diff --git a/examples/daemonset/haproxy/README.md b/examples/daemonset/haproxy/README.md index 75fe32eb5..31390913e 100644 --- a/examples/daemonset/haproxy/README.md +++ b/examples/daemonset/haproxy/README.md @@ -4,10 +4,10 @@ In some cases, the Ingress controller will be required to be run at all the node ## Prerequisites -This ingress controller doesn't yet have support for -[ingress classes](/examples/PREREQUISITES.md#ingress-class). You MUST turn -down any existing ingress controllers before running HAProxy Ingress controller or -they will fight for Ingresses. This includes any cloudprovider controller. +If you have another Ingress controller deployed, you will need to make sure your +Ingress resources target exactly one Ingress controller by specifying the +[ingress.class](/examples/PREREQUISITES.md#ingress-class) annotation as +`haproxy`. This document has also the following prerequisites: @@ -41,6 +41,11 @@ NAME READY STATUS RESTARTS AGE default-http-backend-q5sb6 1/1 Running 0 30m ``` +## RBAC Authorization + +Check the [RBAC sample](/examples/rbac/haproxy) if deploying on a cluster with +[RBAC authorization](https://kubernetes.io/docs/admin/authorization/rbac/). + ## Ingress DaemonSet Deploy the daemonset as follows: diff --git a/examples/daemonset/haproxy/haproxy-ingress-daemonset.yaml b/examples/daemonset/haproxy/haproxy-ingress-daemonset.yaml index 6d6d689ad..713c92612 100644 --- a/examples/daemonset/haproxy/haproxy-ingress-daemonset.yaml +++ b/examples/daemonset/haproxy/haproxy-ingress-daemonset.yaml @@ -13,10 +13,9 @@ spec: containers: - name: haproxy-ingress image: quay.io/jcmoraisjr/haproxy-ingress - imagePullPolicy: IfNotPresent args: - - --default-backend-service=default/default-http-backend - - --default-ssl-certificate=default/tls-secret + - --default-backend-service=$(POD_NAMESPACE)/default-http-backend + - --default-ssl-certificate=$(POD_NAMESPACE)/tls-secret ports: - name: http containerPort: 80 diff --git a/examples/daemonset/nginx/nginx-ingress-daemonset.yaml b/examples/daemonset/nginx/nginx-ingress-daemonset.yaml index 6d2f42b4e..69b57f9ec 100644 --- a/examples/daemonset/nginx/nginx-ingress-daemonset.yaml +++ b/examples/daemonset/nginx/nginx-ingress-daemonset.yaml @@ -16,7 +16,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-lb readinessProbe: httpGet: diff --git a/examples/deployment/haproxy/README.md b/examples/deployment/haproxy/README.md index 47e320f46..de54eae7c 100644 --- a/examples/deployment/haproxy/README.md +++ b/examples/deployment/haproxy/README.md @@ -5,10 +5,10 @@ for instructions on how to create a new one. ## Prerequisites -This ingress controller doesn't yet have support for -[ingress classes](/examples/PREREQUISITES.md#ingress-class). You MUST turn -down any existing ingress controllers before running HAProxy Ingress controller or -they will fight for Ingresses. This includes any cloudprovider controller. +If you have another Ingress controller deployed, you will need to make sure your +Ingress resources target exactly one Ingress controller by specifying the +[ingress.class](/examples/PREREQUISITES.md#ingress-class) annotation as +`haproxy`. This document has also the following prerequisites: @@ -70,6 +70,11 @@ configmap can be edited or replaced later in order to apply new configuration on a running ingress controller. All supported options are [here](https://github.com/jcmoraisjr/haproxy-ingress#configmap). +## RBAC Authorization + +Check the [RBAC sample](/examples/rbac/haproxy) if deploying on a cluster with +[RBAC authorization](https://kubernetes.io/docs/admin/authorization/rbac/). + ## Controller Deploy HAProxy Ingress: diff --git a/examples/deployment/haproxy/haproxy-ingress.yaml b/examples/deployment/haproxy/haproxy-ingress.yaml index 84c661b90..796e4c3f8 100644 --- a/examples/deployment/haproxy/haproxy-ingress.yaml +++ b/examples/deployment/haproxy/haproxy-ingress.yaml @@ -5,7 +5,6 @@ metadata: run: haproxy-ingress name: haproxy-ingress spec: - replicas: 1 selector: matchLabels: run: haproxy-ingress @@ -18,9 +17,9 @@ spec: - name: haproxy-ingress image: quay.io/jcmoraisjr/haproxy-ingress args: - - --default-backend-service=default/ingress-default-backend - - --default-ssl-certificate=default/tls-secret - - --configmap=default/haproxy-ingress + - --default-backend-service=$(POD_NAMESPACE)/ingress-default-backend + - --default-ssl-certificate=$(POD_NAMESPACE)/tls-secret + - --configmap=$(POD_NAMESPACE)/haproxy-ingress ports: - name: http containerPort: 80 diff --git a/examples/deployment/nginx/README.md b/examples/deployment/nginx/README.md index 7adf49811..2eff41f2b 100644 --- a/examples/deployment/nginx/README.md +++ b/examples/deployment/nginx/README.md @@ -13,7 +13,7 @@ $ kubectl apply -f default-backend.yaml deployment "default-http-backend" created service "default-http-backend" created -$ kubectl -n kube-system get po +$ kubectl -n kube-system get pods NAME READY STATUS RESTARTS AGE default-http-backend-2657704409-qgwdd 1/1 Running 0 28s ``` @@ -22,23 +22,35 @@ default-http-backend-2657704409-qgwdd 1/1 Running 0 28s You can deploy the controller as follows: +1. Disable the ingress addon: +```console +$ minikube addons disable ingress +``` +2. Use the [docker daemon](https://github.com/kubernetes/minikube/blob/master/docs/reusing_the_docker_daemon.md) +3. [Build the image](../../../docs/dev/getting-started.md) +4. Change [nginx-ingress-controller.yaml](nginx-ingress-controller.yaml) to use the appropriate image. Local images can be +seen by performing `docker images`. +```yaml +image: : +``` +5. Create the nginx-ingress-controller deployment: ```console $ kubectl apply -f nginx-ingress-controller.yaml deployment "nginx-ingress-controller" created -$ kubectl -n kube-system get po +$ kubectl -n kube-system get pods NAME READY STATUS RESTARTS AGE default-http-backend-2657704409-qgwdd 1/1 Running 0 2m nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s ``` Note the default settings of this controller: -* serves a `/healthz` url on port 10254, as both a liveness and readiness probe +* serves a `/healthz` url on port 10254, as a status probe * takes a `--default-backend-service` argument pointing to the Service created above ## Running on a cloud provider -If you're running this ingress controller on a cloudprovider, you should assume +If you're running this ingress controller on a cloud-provider, you should assume the provider also has a native Ingress controller and set the annotation `kubernetes.io/ingress.class: nginx` in all Ingresses meant for this controller. You might also need to open a firewall-rule for ports 80/443 of the nodes the diff --git a/examples/deployment/nginx/kubeadm/nginx-ingress-controller.yaml b/examples/deployment/nginx/kubeadm/nginx-ingress-controller.yaml index 7614c4f75..aa1d5949f 100644 --- a/examples/deployment/nginx/kubeadm/nginx-ingress-controller.yaml +++ b/examples/deployment/nginx/kubeadm/nginx-ingress-controller.yaml @@ -71,7 +71,7 @@ spec: hostNetwork: true terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-controller readinessProbe: httpGet: diff --git a/examples/deployment/nginx/nginx-ingress-controller.yaml b/examples/deployment/nginx/nginx-ingress-controller.yaml index c11c0f8a4..8aecb6fbd 100644 --- a/examples/deployment/nginx/nginx-ingress-controller.yaml +++ b/examples/deployment/nginx/nginx-ingress-controller.yaml @@ -22,7 +22,7 @@ spec: # hostNetwork: true terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-controller readinessProbe: httpGet: diff --git a/examples/health-checks/gce/health_check_app.yaml b/examples/health-checks/gce/health_check_app.yaml index ee8ade185..b8d36bf38 100644 --- a/examples/health-checks/gce/health_check_app.yaml +++ b/examples/health-checks/gce/health_check_app.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - name: echoheaders - image: gcr.io/google_containers/echoserver:1.5 + image: gcr.io/google_containers/echoserver:1.8 ports: - containerPort: 8080 readinessProbe: @@ -23,22 +23,22 @@ spec: successThreshold: 1 failureThreshold: 10 env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP --- apiVersion: v1 diff --git a/examples/http-svc.yaml b/examples/http-svc.yaml index 372445064..ff25ab004 100644 --- a/examples/http-svc.yaml +++ b/examples/http-svc.yaml @@ -21,7 +21,7 @@ kind: ReplicationController metadata: name: http-svc spec: - replicas: 2 + replicas: 1 template: metadata: labels: @@ -29,23 +29,23 @@ spec: spec: containers: - name: http-svc - image: gcr.io/google_containers/echoserver:1.5 + image: gcr.io/google_containers/echoserver:1.8 ports: - containerPort: 8080 env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/examples/multi-tls/haproxy/README.md b/examples/multi-tls/haproxy/README.md index 402585f8a..bb7358c78 100644 --- a/examples/multi-tls/haproxy/README.md +++ b/examples/multi-tls/haproxy/README.md @@ -1,6 +1,6 @@ # HAProxy Multi TLS certificate termination -This examples uses 2 different certificates to terminate SSL for 2 hostnames. +This example uses 2 different certificates to terminate SSL for 2 hostnames. ## Prerequisites @@ -9,9 +9,6 @@ This document has the following prerequisites: * Deploy [HAProxy Ingress controller](/examples/deployment/haproxy), you should end up with controller, a sample web app and default TLS secret * Create [*two* secrets](/examples/PREREQUISITES.md#tls-certificates) named `foobar-ssl` with subject `'/CN=foo.bar'` and `barfoo-ssl` with subject `'/CN=bar.foo'` -As mentioned in the deployment instructions, you MUST turn down any existing -ingress controllers before running HAProxy Ingress. - ## Using a new TLS certificate Update ingress resource in order to add TLS termination to two hosts: @@ -70,4 +67,4 @@ $ openssl s_client -connect 10.129.51.55:31578 -servername bar.foo subject=/CN=bar.foo issuer=/CN=bar.foo --- -``` \ No newline at end of file +``` diff --git a/examples/multi-tls/nginx/README.md b/examples/multi-tls/nginx/README.md index ef4cd65bb..7eddc42d2 100644 --- a/examples/multi-tls/nginx/README.md +++ b/examples/multi-tls/nginx/README.md @@ -1,6 +1,6 @@ # Multi TLS certificate termination -This examples uses 2 different certificates to terminate SSL for 2 hostnames. +This example uses 2 different certificates to terminate SSL for 2 hostnames. 1. Deploy the controller by creating the rc in the parent dir 2. Create tls secrets for foo.bar.com and bar.baz.com as indicated in the yaml @@ -91,4 +91,4 @@ $ curl https://104.154.30.67 -H 'Host:bar.baz.com' -k $ curl 104.154.30.67 default backend - 404 -``` \ No newline at end of file +``` diff --git a/examples/multi-tls/nginx/multi-tls.yaml b/examples/multi-tls/nginx/multi-tls.yaml index a2f2048b4..a8446aa62 100644 --- a/examples/multi-tls/nginx/multi-tls.yaml +++ b/examples/multi-tls/nginx/multi-tls.yaml @@ -58,26 +58,26 @@ spec: spec: containers: - name: echoheaders - image: gcr.io/google_containers/echoserver:1.5 + image: gcr.io/google_containers/echoserver:1.8 ports: - containerPort: 8080 env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP --- apiVersion: extensions/v1beta1 @@ -93,7 +93,7 @@ spec: # The cert must also contain the subj-name foo.bar.com # You can create it via: # make keys secret SECRET=/tmp/foobar.json HOST=foo.bar.com NAME=foobar - # https://github.com/kubernetes/contrib/tree/master/ingress/controllers/gce/https_example + # https://github.com/kubernetes/ingress/tree/master/controllers/gce/examples/https secretName: foobar - hosts: - bar.baz.com @@ -101,7 +101,7 @@ spec: # The cert must also contain the subj-name bar.baz.com # You can create it via: # make keys secret SECRET=/tmp/barbaz.json HOST=bar.baz.com NAME=barbaz - # https://github.com/kubernetes/contrib/tree/master/ingress/controllers/gce/https_example + # https://github.com/kubernetes/ingress/tree/master/controllers/gce/examples/https secretName: barbaz rules: - host: foo.bar.com diff --git a/examples/rbac/haproxy/README.md b/examples/rbac/haproxy/README.md new file mode 100644 index 000000000..d21899a37 --- /dev/null +++ b/examples/rbac/haproxy/README.md @@ -0,0 +1,80 @@ +# Role Based Access Control + +This example demonstrates how to authorize an ingress controller on a cluster +with role based access control. + +## Overview + +This example applies to ingress controllers being deployed in an environment with +[RBAC](https://kubernetes.io/docs/admin/authorization/rbac/) enabled. + +## Service Account created in this example + +One ServiceAccount is created in this example, `ingress-controller`. See +[Using cert based authentication](#using-cert-based-authentication) +below if using client cert authentication. + +## Permissions Granted in this example + +There are two sets of permissions defined in this example. Cluster-wide +permissions defined by a `ClusterRole` and namespace specific permissions +defined by a `Role`, both named `ingress-controller`. + +### Cluster Permissions + +These permissions are granted in order for the ingress-controller to be +able to function as an ingress across the cluster. These permissions are +granted to the ClusterRole: + +* `configmaps`, `endpoints`, `nodes`, `pods`, `secrets`: list, watch +* `nodes`: get +* `services`, `ingresses`: get, list, watch +* `events`: create, patch +* `ingresses/status`: update + +### Namespace Permissions + +These permissions are granted specific to the `ingress-controller` namespace. +The Role permissions are: + +* `configmaps`, `pods`, `secrets`: get +* `endpoints`: create, get, update + +Furthermore to support leader-election, the ingress controller needs to +have access to a `configmap` in the `ingress-controller` namespace: + +* `configmaps`: get, update, create + +## Namespace created in this example + +The `Namespace` named `ingress-controller` is defined in this example. The +namespace name can be changed arbitrarily as long as all of the references +change as well. + +## Usage + +1. Create the `Namespace`, `Service Account`, `ClusterRole`, `Role`, +`ClusterRoleBinding`, and `RoleBinding`: + +```console +$ kubectl create -f ingress-controller-rbac.yml +``` + +2. Deploy the ingress controller. The deployment should be configured to use +the `ingress-controller` service account name if not using kubeconfig and +client cert based authentication. Add the `serviceAccountName` to the pod +template spec: + +```yaml +spec: + template: + spec: + serviceAccountName: ingress-controller +``` + +## Using cert based authentication + +A client certificate based authentication can also be used with the following changes: + +1. No need to add the `serviceAccountName` to the pod template spec. +2. Sign a client certificate using `ingress-controller` as it's common name. diff --git a/examples/rbac/haproxy/ingress-controller-rbac.yml b/examples/rbac/haproxy/ingress-controller-rbac.yml new file mode 100644 index 000000000..57d0de7e0 --- /dev/null +++ b/examples/rbac/haproxy/ingress-controller-rbac.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-controller +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-controller + namespace: ingress-controller +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: ingress-controller +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "extensions" + resources: + - ingresses/status + verbs: + - update +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: ingress-controller + namespace: ingress-controller +rules: + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - create + - update +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: ingress-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-controller +subjects: + - kind: ServiceAccount + name: ingress-controller + namespace: ingress-controller + - apiGroup: rbac.authorization.k8s.io + kind: User + name: ingress-controller +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: ingress-controller + namespace: ingress-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-controller +subjects: + - kind: ServiceAccount + name: ingress-controller + namespace: ingress-controller + - apiGroup: rbac.authorization.k8s.io + kind: User + name: ingress-controller diff --git a/examples/rbac/nginx/README.md b/examples/rbac/nginx/README.md index ef910e07c..192fa6dd5 100644 --- a/examples/rbac/nginx/README.md +++ b/examples/rbac/nginx/README.md @@ -1,6 +1,6 @@ # Role Based Access Control -This example demontrates how to apply an nginx ingress controller with role based access control +This example demonstrates how to apply an nginx ingress controller with role based access control ## Overview @@ -62,7 +62,7 @@ have access to a `configmap` using the resourceName `ingress-controller-leader-n * `configmaps`: create This resourceName is the concatenation of the `election-id` and the -`ingress-class` as defined by the ingress-controller, which default to: +`ingress-class` as defined by the ingress-controller, which defaults to: * `election-id`: `ingress-controller-leader` * `ingress-class`: `nginx` diff --git a/examples/rbac/nginx/nginx-ingress-controller-rbac.yml b/examples/rbac/nginx/nginx-ingress-controller-rbac.yml index 64561f375..8bd611bb6 100644 --- a/examples/rbac/nginx/nginx-ingress-controller-rbac.yml +++ b/examples/rbac/nginx/nginx-ingress-controller-rbac.yml @@ -73,6 +73,7 @@ rules: - configmaps - pods - secrets + - namespaces verbs: - get - apiGroups: diff --git a/examples/rbac/nginx/nginx-ingress-controller.yml b/examples/rbac/nginx/nginx-ingress-controller.yml index 201bb8042..7c0fb7ce3 100644 --- a/examples/rbac/nginx/nginx-ingress-controller.yml +++ b/examples/rbac/nginx/nginx-ingress-controller.yml @@ -16,7 +16,7 @@ spec: serviceAccountName: nginx-ingress-serviceaccount containers: - name: nginx-ingress-controller - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend @@ -26,7 +26,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE + - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace diff --git a/examples/rewrite/haproxy/README.md b/examples/rewrite/haproxy/README.md index 07c8fc4b3..e013bb92f 100644 --- a/examples/rewrite/haproxy/README.md +++ b/examples/rewrite/haproxy/README.md @@ -12,9 +12,6 @@ the `foo.bar` domain * Configure only the default [TLS termination](/examples/tls-termination/haproxy) - there is no need to create another secret -As mentioned in the deployment instructions, you MUST turn down any existing -ingress controllers before running HAProxy Ingress. - ## Annotations The following annotations are implemented: diff --git a/examples/rewrite/nginx/README.md b/examples/rewrite/nginx/README.md index f934eb616..b3e50a88d 100644 --- a/examples/rewrite/nginx/README.md +++ b/examples/rewrite/nginx/README.md @@ -16,6 +16,7 @@ Rewriting can be controlled using the following annotations: | --- | --- | --- | |ingress.kubernetes.io/rewrite-target|Target URI where the traffic must be redirected|string| |ingress.kubernetes.io/add-base-url|indicates if is required to add a base tag in the head of the responses from the upstream servers|bool| +|ingress.kubernetes.io/base-url-scheme|Override for the scheme passed to the base tag|string| |ingress.kubernetes.io/ssl-redirect|Indicates if the location section is accessible SSL only (defaults to True when Ingress contains a Certificate)|bool| |ingress.kubernetes.io/force-ssl-redirect|Forces the redirection to HTTPS even if the Ingress is not TLS Enabled|bool| |ingress.kubernetes.io/app-root|Defines the Application Root that the Controller must redirect if it's not in '/' context|string| diff --git a/examples/scaling-deployment/haproxy/README.md b/examples/scaling-deployment/haproxy/README.md index 463550d72..31daaea39 100644 --- a/examples/scaling-deployment/haproxy/README.md +++ b/examples/scaling-deployment/haproxy/README.md @@ -4,10 +4,10 @@ This example aims to demonstrate the Deployment of multi haproxy ingress control ## Prerequisites -This ingress controller doesn't yet have support for -[ingress classes](/examples/PREREQUISITES.md#ingress-class). You MUST turn -down any existing ingress controllers before running HAProxy Ingress controller or -they will fight for Ingresses. This includes any cloudprovider controller. +If you have another Ingress controller deployed, you will need to make sure your +Ingress resources target exactly one Ingress controller by specifying the +[ingress.class](/examples/PREREQUISITES.md#ingress-class) annotation as +`haproxy`. This document has also the following prerequisites: @@ -41,6 +41,11 @@ NAME READY STATUS RESTARTS AGE default-http-backend-q5sb6 1/1 Running 0 30m ``` +## RBAC Authorization + +Check the [RBAC sample](/examples/rbac/haproxy) if deploying on a cluster with +[RBAC authorization](https://kubernetes.io/docs/admin/authorization/rbac/). + ## Ingress Deployment Deploy the Deployment of multi controllers as follows: diff --git a/examples/scaling-deployment/haproxy/haproxy-ingress-deployment.yaml b/examples/scaling-deployment/haproxy/haproxy-ingress-deployment.yaml index 357ed6387..853af0ab5 100644 --- a/examples/scaling-deployment/haproxy/haproxy-ingress-deployment.yaml +++ b/examples/scaling-deployment/haproxy/haproxy-ingress-deployment.yaml @@ -17,10 +17,9 @@ spec: containers: - name: haproxy-ingress image: quay.io/jcmoraisjr/haproxy-ingress - imagePullPolicy: IfNotPresent args: - - --default-backend-service=default/default-http-backend - - --default-ssl-certificate=default/tls-secret + - --default-backend-service=$(POD_NAMESPACE)/default-http-backend + - --default-ssl-certificate=$(POD_NAMESPACE)/tls-secret ports: - name: http containerPort: 80 diff --git a/examples/scaling-deployment/nginx/nginx-ingress-deployment.yaml b/examples/scaling-deployment/nginx/nginx-ingress-deployment.yaml index cdd6d2e33..78fac2259 100644 --- a/examples/scaling-deployment/nginx/nginx-ingress-deployment.yaml +++ b/examples/scaling-deployment/nginx/nginx-ingress-deployment.yaml @@ -14,7 +14,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-controller readinessProbe: httpGet: diff --git a/examples/static-ip/nginx/nginx-ingress-controller.yaml b/examples/static-ip/nginx/nginx-ingress-controller.yaml index 86427e679..d1e83c8cd 100644 --- a/examples/static-ip/nginx/nginx-ingress-controller.yaml +++ b/examples/static-ip/nginx/nginx-ingress-controller.yaml @@ -18,7 +18,7 @@ spec: # hostNetwork: true terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-ingress-controller readinessProbe: httpGet: diff --git a/examples/tcp/nginx/README.md b/examples/tcp/nginx/README.md index 8a0981499..010403f1b 100644 --- a/examples/tcp/nginx/README.md +++ b/examples/tcp/nginx/README.md @@ -1,6 +1,6 @@ # TCP loadbalancing -This example show how to implement TCP loadbalancing throught the Nginx Controller +This example shows how to implement TCP loadbalancing through the Nginx Controller ## Prerequisites @@ -47,7 +47,7 @@ nginx-ingress-controller 1 1 1 3m $ kubectl -n kube-system describe rc nginx-ingress-controller Name: nginx-ingress-controller Namespace: kube-system -Image(s): gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 +Image(s): gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 Selector: k8s-app=nginx-tcp-ingress-lb Labels: k8s-app=nginx-ingress-lb Annotations: diff --git a/examples/tcp/nginx/nginx-tcp-ingress-controller.yaml b/examples/tcp/nginx/nginx-tcp-ingress-controller.yaml index 0a30a13ba..e38843630 100644 --- a/examples/tcp/nginx/nginx-tcp-ingress-controller.yaml +++ b/examples/tcp/nginx/nginx-tcp-ingress-controller.yaml @@ -17,7 +17,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-tcp-ingress-lb readinessProbe: httpGet: diff --git a/examples/tls-termination/elb-nginx/README.md b/examples/tls-termination/elb-nginx/README.md new file mode 100644 index 000000000..9fc110b19 --- /dev/null +++ b/examples/tls-termination/elb-nginx/README.md @@ -0,0 +1,15 @@ + +### Elastic Load Balancer for TLS termination + +This example shows the required steps to use classic Elastic Load Balancer for TLS termination. + +Change line of the file `elb-tls-nginx-ingress-controller.yaml` replacing the dummy id with a valid one `"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX"` + +Then execute: +``` +$ kubectl create -f elb-tls-nginx-ingress-controller.yaml +``` + +This example creates an ELB with just two listeners, one in port 80 and another in port 443 + +![Listeners](images/listener.png) diff --git a/examples/tls-termination/elb-nginx/images/listener.png b/examples/tls-termination/elb-nginx/images/listener.png new file mode 100644 index 000000000..006c69871 Binary files /dev/null and b/examples/tls-termination/elb-nginx/images/listener.png differ diff --git a/examples/tls-termination/elb-nginx/nginx-ingress-controller.yaml b/examples/tls-termination/elb-nginx/nginx-ingress-controller.yaml new file mode 100644 index 000000000..dc5be226c --- /dev/null +++ b/examples/tls-termination/elb-nginx/nginx-ingress-controller.yaml @@ -0,0 +1,135 @@ +kind: Service +apiVersion: v1 +metadata: + name: nginx-default-backend + labels: + k8s-addon: ingress-nginx.addons.k8s.io +spec: + ports: + - port: 80 + targetPort: http + selector: + app: nginx-default-backend + +--- + +kind: Deployment +apiVersion: extensions/v1beta1 +metadata: + name: nginx-default-backend + labels: + k8s-addon: ingress-nginx.addons.k8s.io +spec: + replicas: 1 + template: + metadata: + labels: + k8s-addon: ingress-nginx.addons.k8s.io + app: nginx-default-backend + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: default-http-backend + image: gcr.io/google_containers/defaultbackend:1.0 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + ports: + - name: http + containerPort: 8080 + protocol: TCP + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + name: ingress-nginx + labels: + k8s-addon: ingress-nginx.addons.k8s.io + +--- + +kind: Service +apiVersion: v1 +metadata: + name: ingress-nginx + labels: + k8s-addon: ingress-nginx.addons.k8s.io + annotations: + # replace with the correct value of the generated certifcate in the AWS console + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX" + # the backend instances are HTTP + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + # Map port 443 + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + +spec: + type: LoadBalancer + selector: + app: ingress-nginx + ports: + - name: http + port: 80 + targetPort: http + - name: https + port: 443 + targetPort: http + +--- + +kind: Deployment +apiVersion: extensions/v1beta1 +metadata: + name: ingress-nginx + labels: + k8s-addon: ingress-nginx.addons.k8s.io +spec: + replicas: 1 + template: + metadata: + labels: + app: ingress-nginx + k8s-addon: ingress-nginx.addons.k8s.io + spec: + terminationGracePeriodSeconds: 60 + containers: + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 + name: ingress-nginx + imagePullPolicy: Always + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/nginx-default-backend + - --configmap=$(POD_NAMESPACE)/ingress-nginx + - --publish-service=$(POD_NAMESPACE)/ingress-nginx diff --git a/examples/tls-termination/haproxy/README.md b/examples/tls-termination/haproxy/README.md index e019a00ee..8f1fb8515 100644 --- a/examples/tls-termination/haproxy/README.md +++ b/examples/tls-termination/haproxy/README.md @@ -7,9 +7,6 @@ This document has the following prerequisites: * Deploy [HAProxy Ingress controller](/examples/deployment/haproxy), you should end up with controller, a sample web app and default TLS secret * Create [*another* secret](/examples/PREREQUISITES.md#tls-certificates) named `foobar-ssl` and subject `'/CN=foo.bar'` -As mentioned in the deployment instructions, you MUST turn down any existing -ingress controllers before running HAProxy Ingress. - ## Using default TLS certificate Update ingress resource in order to add TLS termination to host `foo.bar`: diff --git a/examples/udp/nginx/README.md b/examples/udp/nginx/README.md index 97fce00f9..ff70760b3 100644 --- a/examples/udp/nginx/README.md +++ b/examples/udp/nginx/README.md @@ -1,6 +1,6 @@ # UDP loadbalancing -This example show how to implement UDP loadbalancing throught the Nginx Controller +This example shows how to implement UDP loadbalancing through the Nginx Controller ## Prerequisites @@ -53,7 +53,7 @@ nginx-udp-ingress-controller 1 1 1 13m $ kubectl -n kube-system describe rc nginx-udp-ingress-controller Name: nginx-udp-ingress-controller Namespace: kube-system -Image(s): gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 +Image(s): gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 Selector: k8s-app=nginx-udp-ingress-lb Labels: k8s-app=nginx-udp-ingress-lb Annotations: diff --git a/examples/udp/nginx/nginx-udp-ingress-controller.yaml b/examples/udp/nginx/nginx-udp-ingress-controller.yaml index cad2e6f8c..f777ba277 100644 --- a/examples/udp/nginx/nginx-udp-ingress-controller.yaml +++ b/examples/udp/nginx/nginx-udp-ingress-controller.yaml @@ -17,7 +17,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.8 + - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13 name: nginx-udp-ingress-lb readinessProbe: httpGet: diff --git a/images/404-server/.gitignore b/images/404-server/.gitignore new file mode 100644 index 000000000..254defddb --- /dev/null +++ b/images/404-server/.gitignore @@ -0,0 +1 @@ +server diff --git a/images/404-server/Dockerfile b/images/404-server/Dockerfile new file mode 100644 index 000000000..1875f0a2e --- /dev/null +++ b/images/404-server/Dockerfile @@ -0,0 +1,20 @@ +# Copyright 2015 The Kubernetes Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM scratch + +# nobody:nobody +USER 65534:65534 +COPY server / +ENTRYPOINT ["/server"] diff --git a/images/404-server/Godeps/Godeps.json b/images/404-server/Godeps/Godeps.json new file mode 100644 index 000000000..b985aed2b --- /dev/null +++ b/images/404-server/Godeps/Godeps.json @@ -0,0 +1,57 @@ +{ + "ImportPath": "k8s.io/ingress/images/404-server", + "GoVersion": "go1.8", + "GodepVersion": "v79", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/beorn7/perks/quantile", + "Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4" + }, + { + "ImportPath": "github.com/golang/protobuf/proto", + "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" + }, + { + "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", + "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" + }, + { + "ImportPath": "github.com/prometheus/client_golang/prometheus", + "Comment": "v0.8.0-83-ge7e9030", + "Rev": "e7e903064f5e9eb5da98208bae10b475d4db0f8c" + }, + { + "ImportPath": "github.com/prometheus/client_golang/prometheus/promhttp", + "Comment": "v0.8.0-83-ge7e9030", + "Rev": "e7e903064f5e9eb5da98208bae10b475d4db0f8c" + }, + { + "ImportPath": "github.com/prometheus/client_model/go", + "Comment": "model-0.0.2-12-gfa8ad6f", + "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" + }, + { + "ImportPath": "github.com/prometheus/common/expfmt", + "Rev": "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207" + }, + { + "ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", + "Rev": "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207" + }, + { + "ImportPath": "github.com/prometheus/common/model", + "Rev": "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207" + }, + { + "ImportPath": "github.com/prometheus/procfs", + "Rev": "65c1f6f8f0fc1e2185eb9863a3bc751496404259" + }, + { + "ImportPath": "github.com/prometheus/procfs/xfs", + "Rev": "65c1f6f8f0fc1e2185eb9863a3bc751496404259" + } + ] +} diff --git a/images/404-server/Godeps/Readme b/images/404-server/Godeps/Readme new file mode 100644 index 000000000..4cdaa53d5 --- /dev/null +++ b/images/404-server/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/images/404-server/Makefile b/images/404-server/Makefile new file mode 100644 index 000000000..e292c235c --- /dev/null +++ b/images/404-server/Makefile @@ -0,0 +1,44 @@ +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the default backend binary or image for amd64, arm, arm64 and ppc64le +# +# Usage: +# [PREFIX=gcr.io/google_containers/defaultbackend] [ARCH=amd64] [TAG=1.1] make (server|container|push) + + +all: push + +TAG=1.4 +PREFIX?=gcr.io/google_containers/defaultbackend +ARCH?=amd64 + +server: server.go + CGO_ENABLED=0 GOOS=linux GOARCH=$(ARCH) GOARM=6 go build -a -installsuffix cgo -ldflags '-w -s' -o server + +container: server + docker build --pull -t $(PREFIX)-$(ARCH):$(TAG) . + +push: container + gcloud docker -- push $(PREFIX)-$(ARCH):$(TAG) + +push-legacy: container +ifeq ($(ARCH),amd64) + # Backward compatibility. TODO: deprecate this image tag + docker tag -f $(PREFIX)-$(ARCH):$(TAG) $(PREFIX):$(TAG) + gcloud docker -- push $(PREFIX):$(TAG) +endif + +clean: + rm -f server diff --git a/images/404-server/OWNERS b/images/404-server/OWNERS new file mode 100644 index 000000000..937f1d223 --- /dev/null +++ b/images/404-server/OWNERS @@ -0,0 +1,8 @@ +approvers: +- bprashanth +- luxas +- mikedanese +reviewers: +- bprashanth +- luxas +- mikedanese diff --git a/images/404-server/README.md b/images/404-server/README.md new file mode 100644 index 000000000..e281a138a --- /dev/null +++ b/images/404-server/README.md @@ -0,0 +1,33 @@ +# 404-server (default backend) + +404-server is a simple webserver that satisfies the ingress, which means it has to do two things: + + 1. Serves a 404 page at `/` + 2. Serves 200 on a `/healthz` + +## How to release: + +The `404-server` Makefile supports multiple architectures, which means it may cross-compile and build an docker image easily. +If you are releasing a new version, please bump the `TAG` value in the `Makefile` before building the images. + +How to build and push all images: +``` +# Build for linux/amd64 (default) +$ make push +$ make push ARCH=amd64 +# ---> gcr.io/google_containers/defaultbackend-amd64:TAG + +$ make push-legacy ARCH=amd64 +# ---> gcr.io/google_containers/defaultbackend:TAG (image with backwards compatible naming) + +$ make push ARCH=arm +# ---> gcr.io/google_containers/defaultbackend-arm:TAG + +$ make push ARCH=arm64 +# ---> gcr.io/google_containers/defaultbackend-arm64:TAG + +$ make push ARCH=ppc64le +# ---> gcr.io/google_containers/defaultbackend-ppc64le:TAG +``` + +Of course, if you don't want to push the images, just run `make container` diff --git a/images/404-server/metrics.go b/images/404-server/metrics.go new file mode 100644 index 000000000..1ca3b2ad8 --- /dev/null +++ b/images/404-server/metrics.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Collect and display prometheus metrics + +package main + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +const ( + namespace = "default_http_backend" + subsystem = "http" +) + +var ( + requestCount = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "request_count_total", + Help: "Counter of HTTP requests made.", + }, []string{"proto"}) + + requestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "request_duration_milliseconds", + Help: "Histogram of the time (in milliseconds) each request took.", + Buckets: append([]float64{.001, .003}, prometheus.DefBuckets...), + }, []string{"proto"}) +) diff --git a/images/404-server/server.go b/images/404-server/server.go new file mode 100644 index 000000000..615a7093b --- /dev/null +++ b/images/404-server/server.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// A webserver that only serves a 404 page. Used as a default backend. +package main + +import ( + "flag" + "fmt" + "net/http" + "os" + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var port = flag.Int("port", 8080, "Port number to serve default backend 404 page.") + +func init() { + // Register the summary and the histogram with Prometheus's default registry. + prometheus.MustRegister(requestCount) + prometheus.MustRegister(requestDuration) +} + +func main() { + flag.Parse() + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + start := time.Now() + w.WriteHeader(http.StatusNotFound) + fmt.Fprint(w, "default backend - 404") + + duration := time.Now().Sub(start).Seconds() * 1e3 + + proto := strconv.Itoa(r.ProtoMajor) + proto = proto + "." + strconv.Itoa(r.ProtoMinor) + + requestCount.WithLabelValues(proto).Inc() + requestDuration.WithLabelValues(proto).Observe(duration) + }) + http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, "ok") + }) + http.Handle("/metrics", promhttp.Handler()) + // TODO: Use .Shutdown from Go 1.8 + err := http.ListenAndServe(fmt.Sprintf(":%d", *port), nil) + if err != nil { + fmt.Fprintf(os.Stderr, "could not start http server: %s\n", err) + os.Exit(1) + } +} diff --git a/images/404-server/vendor/github.com/beorn7/perks/LICENSE b/images/404-server/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 000000000..339177be6 --- /dev/null +++ b/images/404-server/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/images/404-server/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/images/404-server/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 000000000..1602287d7 --- /dev/null +++ b/images/404-server/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/images/404-server/vendor/github.com/beorn7/perks/quantile/stream.go b/images/404-server/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 000000000..587b1fc5b --- /dev/null +++ b/images/404-server/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,292 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targets map[float64]float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for quantile, epsilon := range targets { + if quantile*s.n <= r { + f = (2 * epsilon * r) / quantile + } else { + f = (2 * epsilon * (s.n - r)) / (1 - quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(float64(l) * q) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/images/404-server/vendor/github.com/golang/protobuf/AUTHORS b/images/404-server/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/images/404-server/vendor/github.com/golang/protobuf/CONTRIBUTORS b/images/404-server/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/images/404-server/vendor/github.com/golang/protobuf/LICENSE b/images/404-server/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 000000000..1b1b1921e --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/Makefile b/images/404-server/vendor/github.com/golang/protobuf/proto/Makefile new file mode 100644 index 000000000..e2e0651a9 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto + make diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/clone.go b/images/404-server/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 000000000..e392575b3 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,229 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/decode.go b/images/404-server/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 000000000..aa207298f --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,970 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + extmap[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/encode.go b/images/404-server/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 000000000..68b9b30cf --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,1355 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + p.buf = append(p.buf, data...) + return err + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Encode++ // Parens are to work around a goimports bug. + } + + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Size++ // Parens are to work around a goimports bug. + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + if err := encodeExtensions(exts); err != nil { + return err + } + v, _ := exts.extensionsRead() + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/equal.go b/images/404-server/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 000000000..2ed1cf596 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/extensions.go b/images/404-server/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 000000000..6b9b36374 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,586 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, bool) { + if ep, ok := p.(extendableProto); ok { + return ep, ok + } + if ep, ok := p.(extendableProtoV1); ok { + return extensionAdapter{ep}, ok + } + return nil, false +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() +var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + epb, ok := extendable(base) + if !ok { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensions(e *XXX_InternalExtensions) error { + m, mu := e.extensionsRead() + if m == nil { + return nil // fast path + } + mu.Lock() + defer mu.Unlock() + return encodeExtensionsMap(m) +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensionsMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func extensionsSize(e *XXX_InternalExtensions) (n int) { + m, mu := e.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + defer mu.Unlock() + return extensionsMapSize(m) +} + +func extensionsMapSize(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + epb, ok := extendable(pb) + if !ok { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok = extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + epb, ok := extendable(pb) + if !ok { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/lib.go b/images/404-server/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 000000000..ac4ddbc07 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,898 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/message_set.go b/images/404-server/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 000000000..fd982decd --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,311 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..fb512e2e1 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,484 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..6b5567d47 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,270 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/properties.go b/images/404-server/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 000000000..ec2289c00 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,872 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.dec = (*Buffer).dec_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } else if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/text.go b/images/404-server/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 000000000..965876bf0 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,854 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, ok := extendable(pv.Interface()); ok { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/images/404-server/vendor/github.com/golang/protobuf/proto/text_parser.go b/images/404-server/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 000000000..61f83c1e1 --- /dev/null +++ b/images/404-server/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,895 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 000000000..13f15dfce --- /dev/null +++ b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2013 Matt T. Proud + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 000000000..66d9b5458 --- /dev/null +++ b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + headerBuf := make([]byte, binary.MaxVarintLen32) + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 000000000..c318385cb --- /dev/null +++ b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 000000000..4b76ea9a1 --- /dev/null +++ b/images/404-server/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + buf := make([]byte, binary.MaxVarintLen32) + encodedLength := binary.PutUvarint(buf, uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/LICENSE b/images/404-server/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/NOTICE b/images/404-server/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 000000000..dd878a30e --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/.gitignore new file mode 100644 index 000000000..3460f0346 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/.gitignore @@ -0,0 +1 @@ +command-line-arguments.test diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/README.md b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 000000000..44986bff0 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1 @@ +See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 000000000..623d3d83f --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by + // Describe. Returned metrics that share the same descriptor must differ + // in their variable label values. This method may be called + // concurrently and must therefore be implemented in a concurrency safe + // way. Blocking occurs at the expense of total performance of rendering + // all registered metrics. Ideally, Collector implementations support + // concurrent readers. + Collect(chan<- Metric) +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 000000000..72d5256a5 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,164 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + value +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + c.value.Add(v) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +// +// CounterVec embeds MetricVec. See there for a full list of methods with +// detailed documentation. +type CounterVec struct { + *MetricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + result := &counter{value: value{ + desc: desc, + valType: CounterValue, + labelPairs: makeLabelPairs(desc, lvs), + }} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Counter and not a +// Metric so that no type conversion is required. +func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Counter and not a Metric so that no +// type conversion is required. +func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *CounterVec) WithLabelValues(lvs ...string) Counter { + return m.MetricVec.WithLabelValues(lvs...).(Counter) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *CounterVec) With(labels Labels) Counter { + return m.MetricVec.With(labels).(Counter) +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 000000000..1835b16f6 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,200 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Opts documentation for the implications of +// constant labels. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && + !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 000000000..278969dc7 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,186 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides metrics primitives to instrument code for +// monitoring. It also offers a registry for metrics. Sub-packages allow to +// expose the registered metrics via HTTP (package promhttp) or push them to a +// Pushgateway (package push). +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegistry variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegistry. You can use multiple registries at the +// same time to expose different metrics in different ways. You can use separate +// registries for testing purposes. +// +// Also note that the DefaultRegistry comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// (The top-level functions in the prometheus package are deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 000000000..18a99d5fa --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 000000000..e3b67df8a --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,29 @@ +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 000000000..9ab5a3d62 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,145 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +func NewGauge(opts GaugeOpts) Gauge { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, 0) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *MetricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newValue(desc, GaugeValue, 0, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Gauge and not a +// Metric so that no type conversion is required. +func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Gauge and not a Metric so that no +// type conversion is required. +func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { + return m.MetricVec.WithLabelValues(lvs...).(Gauge) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *GaugeVec) With(labels Labels) Gauge { + return m.MetricVec.With(labels).(Gauge) +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 000000000..f96764559 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,276 @@ +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() Collector { + return &goCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 000000000..f46eff6ac --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,444 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Histogram. Histograms with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // HistogramVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Histograms with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + selfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *MetricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Observer and not a +// Metric so that no type conversion to an Observer is required. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Observer and not a Metric so that no +// type conversion to an Observer is required. +func (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *HistogramVec) WithLabelValues(lvs ...string) Observer { + return m.MetricVec.WithLabelValues(lvs...).(Observer) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *HistogramVec) With(labels Labels) Observer { + return m.MetricVec.With(labels).(Observer) +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/http.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 000000000..d485ce0b8 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,524 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" +) + +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead +// (which is not instrumented, but can be instrumented with the tooling provided +// in package promhttp). +func Handler() http.Handler { + return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.Handler instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := DefaultGatherer.Gather() + if err != nil { + http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + }) +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +// +// Deprecated: InstrumentHandler has several issues. Use the tooling provided in +// package promhttp instead. The issues are the following: +// +// - It uses Summaries rather than Histograms. Summaries are not useful if +// aggregation across multiple instances is required. +// +// - It uses microseconds as unit, which is deprecated and should be replaced by +// seconds. +// +// - The size of the request is calculated in a separate goroutine. Since this +// calculator requires access to the request header, it creates a race with +// any writes to the header performed during request handling. +// httputil.ReverseProxy is a prominent example for a handler +// performing such writes. +// +// - It has additional issues with HTTP/2, cf. +// https://github.com/prometheus/client_golang/issues/272. +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler (and shares the same +// issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same +// issues) but provides more flexibility (at the cost of a more complex call +// syntax). As InstrumentHandler, this function registers four metric +// collectors, but it uses the provided SummaryOpts to create them. However, the +// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced +// by "requests_total", "request_duration_microseconds", "request_size_bytes", +// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares +// the same issues) but provides more flexibility (at the cost of a more complex +// call syntax). See InstrumentHandlerWithOpts for details how the provided +// SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + if err := Register(reqCnt); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + panic(err) + } + } + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + if err := Register(reqDur); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + if err := Register(reqSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + if err := Register(resSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + resSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := computeApproximateRequestSize(r) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + reqCnt.WithLabelValues(method, code).Inc() + reqDur.Observe(elapsed) + resSz.Observe(float64(delegate.written)) + reqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request) <-chan int { + // Get URL length in current go routine for avoiding a race condition. + // HandlerFunc that runs in parallel may modify the URL. + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + out := make(chan int, 1) + + go func() { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s + close(out) + }() + + return out +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 000000000..d4063d98f --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,166 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. (Implementers may find + // LabelPairSorter useful for that.) Callers of Write should still make + // sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a metric + // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels + // serve only special purposes. One is for the special case where the + // value of a label does not change during the lifetime of a process, + // e.g. if the revision of the running binary is put into a + // label. Another, more advanced purpose is if more than one Collector + // needs to collect Metrics with the same fully-qualified name. In that + // case, those Metrics must differ in the values of their + // ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 000000000..b0520e85e --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,50 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + + Collector +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 000000000..94b2553e1 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,140 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + pid int + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, rss *Desc + startTime *Desc +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) Collector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) Collector { + ns := "" + if len(namespace) > 0 { + ns = namespace + "_" + } + + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } + + if limits, err := p.NewLimits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + } +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_7.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_7.go new file mode 100644 index 000000000..24b31503e --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_7.go @@ -0,0 +1,38 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + if cn && fl && hj && rf { + return &fancyDelegator{d} + } + + return d +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go new file mode 100644 index 000000000..b7743fb87 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go @@ -0,0 +1,73 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +// newDelegator handles the four different methods of upgrading a +// http.ResponseWriter to delegator. +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, ps := w.(http.Pusher) + _, rf := w.(io.ReaderFrom) + + // Check for the four most common combination of interfaces a + // http.ResponseWriter might implement. + switch { + case cn && fl && hj && rf && ps: + // All interfaces. + return &fancyPushDelegator{ + fancyDelegator: &fancyDelegator{d}, + p: &pushDelegator{d}, + } + case cn && fl && hj && rf: + // All interfaces, except http.Pusher. + return &fancyDelegator{d} + case ps: + // Just http.Pusher. + return &pushDelegator{d} + } + + return d +} + +type fancyPushDelegator struct { + p *pushDelegator + + *fancyDelegator +} + +func (f *fancyPushDelegator) Push(target string, opts *http.PushOptions) error { + return f.p.Push(target, opts) +} + +type pushDelegator struct { + *responseWriterDelegator +} + +func (f *pushDelegator) Push(target string, opts *http.PushOptions) error { + return f.ResponseWriter.(http.Pusher).Push(target, opts) +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 000000000..4c70a7af6 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,204 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The +// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP +// error, no error logging, and compression if requested by the client. +// +// If you want to create a Handler for the DefaultGatherer with different +// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and +// your desired HandlerOpts. +func Handler() http.Handler { + return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) +} + +// HandlerFor returns an http.Handler for the provided Gatherer. The behavior +// of the Handler is defined by the provided HandlerOpts. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf, opts.DisableCompression) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding metric family:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + // Handled later. + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + // TODO(beorn7): Consider streaming serving of metrics. + }) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. It is recommended to at least + // log errors (by providing an ErrorLog in HandlerOpts) to not mask + // errors completely. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { + if compressionDisabled { + return writer, "" + } + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 000000000..1cf21f217 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,95 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. Partitioning of the CounterVec happens by HTTP status +// code and/or HTTP method if the respective instance label names are present +// in the CounterVec. For unpartitioned counting, use a CounterVec with +// zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. The Observe method of the Observer in the ObserverVec +// is called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go new file mode 100644 index 000000000..b51d91052 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go @@ -0,0 +1,142 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "context" + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" +) + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Note that partitioning of Histograms +// is expensive and should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 000000000..ac419e555 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,505 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. The Observe method of the Observer in the ObserverVec +// is called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided +// http.Handler to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. Partitioning of the CounterVec happens by HTTP status +// code and/or HTTP method if the respective instance label names are present +// in the CounterVec. For unpartitioned counting, use a CounterVec with +// zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two labels. The only allowed label names are "code" and "method". The +// function panics if any other instance labels are provided. The Observe +// method of the Observer in the ObserverVec is called with the request +// duration in seconds. Partitioning happens by HTTP status code and/or HTTP +// method if the respective instance label names are present in the +// ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. +// The ObserverVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. The Observe method of the Observer in the ObserverVec +// is called with the request size in bytes. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. +// The ObserverVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. The Observe method of the Observer in the ObserverVec +// is called with the response size in bytes. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + pm dto.Metric + ) + + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + if _, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0); err == nil { + return + } + if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString); err == nil { + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + return + } + panic("previously set label not found – this must never happen") + } + if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString, magicString); err == nil { + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString { + continue + } + if name == "code" || name == "method" { + continue + } + panic("metric partitioned with non-supported labels") + } + code = true + method = true + return + } + panic("metric partitioned with non-supported labels") +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} + +type delegator interface { + Status() int + Written() int64 + + http.ResponseWriter +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) + if r.observeWriteHeader != nil { + r.observeWriteHeader(code) + } +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyDelegator struct { + *responseWriterDelegator +} + +func (r *fancyDelegator) CloseNotify() <-chan bool { + return r.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (r *fancyDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return r.ResponseWriter.(http.Hijacker).Hijack() +} + +func (r *fancyDelegator) Flush() { + r.ResponseWriter.(http.Flusher).Flush() +} + +func (r *fancyDelegator) ReadFrom(re io.Reader) (int64, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + r.written += n + return n, err +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 000000000..8c6b5bd8e --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,755 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "os" + "sort" + "sync" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (see NewProcessCollector) and a Go collector (see +// NewGoCollector) already registered. This approach to keep default instances +// as global state mirrors the approach of other packages in the Go standard +// library. Note that there are caveats. Change the variables with caution and +// only if you understand the consequences. Users who want to avoid global state +// altogether should not use the convenience function and act on custom +// instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather then the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // It is in general not safe to register the same Collector multiple + // times concurrently. + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of MetricFamily protobufs. Even if an error occurs, Gather attempts + // to gather as many metrics as possible. Hence, if a non-nil error is + // returned, the returned MetricFamily slice could be nil (in case of a + // fatal error that prevented any meaningful metric collection) or + // contain a number of MetricFamily protobufs, some of which might be + // incomplete, and some might be missing altogether. The returned error + // (which might be a MultiError) explains the details. In scenarios + // where complete collection is critical, the returned MetricFamily + // protobufs should be disregarded if the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer r.mtx.Unlock() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return AlreadyRegisteredError{ + ExistingCollector: existing, + NewCollector: c, + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + metricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + + // Scatter. + // (Collectors could be complex and slow, so we call them all at once.) + wg.Add(len(r.collectorsByID)) + go func() { + wg.Wait() + close(metricChan) + }() + for _, collector := range r.collectorsByID { + go func(collector Collector) { + defer wg.Done() + collector.Collect(metricChan) + }(collector) + } + + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + + r.mtx.RUnlock() + + // Drain metricChan in case of premature return. + defer func() { + for range metricChan { + } + }() + + // Gather. + for metric := range metricChan { + // This could be done concurrently, too, but it required locking + // of metricFamiliesByName (and of metricHashes if checks are + // enabled). Most likely not worth it. + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + errs = append(errs, fmt.Errorf( + "error collecting metric %v: %s", desc, err, + )) + continue + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + errs = append(errs, fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + )) + continue + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + )) + continue + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + errs = append(errs, fmt.Errorf( + "empty metric collected: %s", dtoMetric, + )) + continue + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + if r.pedanticChecksEnabled { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + errs = append(errs, fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + )) + continue + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + errs = append(errs, err) + continue + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calles are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is alread in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, +) error { + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := hashNew() + h = hashAdd(h, metricFamily.GetName()) + h = hashAddByte(h, separatorByte) + dh := hashNew() + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + dh = hashAdd(dh, lp.GetName()) + dh = hashAddByte(dh, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { + if dimHash != dh { + return fmt.Errorf( + "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", + metricFamily.GetName(), dtoMetric, + ) + } + } else { + dimHashes[metricFamily.GetName()] = dh + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 000000000..1c65e25ec --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,543 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +// +// Deprecated: DefObjectives will not be used as the default objectives in +// v0.10 of the library. The default Summary will have no quantiles then. +var ( + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Summary. Summaries with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // SummaryVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Summaries with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is DefObjectives. It is used if Objectives is left at + // its zero value (i.e. nil). To create a Summary without Objectives, + // set it to an empty map (i.e. map[float64]float64{}). + // + // Deprecated: Note that the current value of DefObjectives is + // deprecated. It will be replaced by an empty map in v0.10 of the + // library. Please explicitly set Objectives to the desired value. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *MetricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in MetricVec. +// The difference is that this method returns an Observer and not a Metric so +// that no type conversion to an Observer is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Observer and not a Metric so that +// no type conversion to an Observer is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *SummaryVec) WithLabelValues(lvs ...string) Observer { + return m.MetricVec.WithLabelValues(lvs...).(Observer) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *SummaryVec) With(labels Labels) Observer { + return m.MetricVec.With(labels).(Observer) +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 000000000..12b65699b --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,48 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. ObserveDuration is +// usually called with a defer statement. +func (t *Timer) ObserveDuration() { + if t.observer != nil { + t.observer.Observe(time.Since(t.begin).Seconds()) + } +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 000000000..065501d38 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,143 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Untyped is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// An Untyped metric works the same as a Gauge. The only difference is that to +// no type information is implied. +// +// To create Untyped instances, use NewUntyped. +// +// Deprecated: The Untyped type is deprecated because it doesn't make sense in +// direct instrumentation. If you need to mirror an external metric of unknown +// type (usually while writing exporters), Use MustNewConstMetric to create an +// untyped metric instance on the fly. +type Untyped interface { + Metric + Collector + + // Set sets the Untyped metric to an arbitrary value. + Set(float64) + // Inc increments the Untyped metric by 1. + Inc() + // Dec decrements the Untyped metric by 1. + Dec() + // Add adds the given value to the Untyped metric. (The value can be + // negative, resulting in a decrease.) + Add(float64) + // Sub subtracts the given value from the Untyped metric. (The value can + // be negative, resulting in an increase.) + Sub(float64) +} + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// NewUntyped creates a new Untyped metric from the provided UntypedOpts. +func NewUntyped(opts UntypedOpts) Untyped { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, 0) +} + +// UntypedVec is a Collector that bundles a set of Untyped metrics that all +// share the same Desc, but have different values for their variable +// labels. This is used if you want to count the same thing partitioned by +// various dimensions. Create instances with NewUntypedVec. +type UntypedVec struct { + *MetricVec +} + +// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &UntypedVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Untyped and not a +// Metric so that no type conversion is required. +func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Untyped and not a Metric so that no +// type conversion is required. +func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { + return m.MetricVec.WithLabelValues(lvs...).(Untyped) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *UntypedVec) With(labels Labels) Untyped { + return m.MetricVec.With(labels).(Untyped) +} + +// UntypedFunc is an Untyped whose value is determined at collect time by +// calling a provided function. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/value.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 000000000..ff75ce585 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,239 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "math" + "sort" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +// value is a generic metric for simple values. It implements Metric, Collector, +// Counter, Gauge, and Untyped. Its effective type is determined by +// ValueType. This is a low-level building block used by the library to back the +// implementations of Counter, Gauge, and Untyped. +type value struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + valType ValueType + labelPairs []*dto.LabelPair +} + +// newValue returns a newly allocated value with the given Desc, ValueType, +// sample value and label values. It panics if the number of label +// values is different from the number of variable labels in Desc. +func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { + if len(labelValues) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &value{ + desc: desc, + valType: valueType, + valBits: math.Float64bits(val), + labelPairs: makeLabelPairs(desc, labelValues), + } + result.init(result) + return result +} + +func (v *value) Desc() *Desc { + return v.desc +} + +func (v *value) Set(val float64) { + atomic.StoreUint64(&v.valBits, math.Float64bits(val)) +} + +func (v *value) SetToCurrentTime() { + v.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (v *value) Inc() { + v.Add(1) +} + +func (v *value) Dec() { + v.Add(-1) +} + +func (v *value) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&v.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { + return + } + } +} + +func (v *value) Sub(val float64) { + v.Add(val * -1) +} + +func (v *value) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) + return populateMetric(v.valType, val, v.labelPairs, out) +} + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 000000000..7f3eef9a4 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,404 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// MetricVec is a Collector to bundle metrics of the same name that +// differ in their label values. MetricVec is usually not used directly but as a +// building block for implementations of vectors of a given metric +// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already +// provided in this package. +type MetricVec struct { + mtx sync.RWMutex // Protects the children. + children map[uint64][]metricWithLabelValues + desc *Desc + + newMetric func(labelValues ...string) Metric + hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized MetricVec. The concrete value is +// returned for embedding into another struct. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { + return &MetricVec{ + children: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// Describe implements Collector. The length of the returned slice +// is always one. +func (m *MetricVec) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.children { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created. +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it at its start value (e.g. a Summary or +// Histogram without any observations). See also the SummaryVec example. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.getOrCreateMetricWithLabelValues(h, lvs), nil +} + +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.getOrCreateMetricWithLabels(h, labels), nil +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics if an error +// occurs. The method allows neat syntax like: +// httpReqs.WithLabelValues("404", "POST").Inc() +func (m *MetricVec) WithLabelValues(lvs ...string) Metric { + metric, err := m.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return metric +} + +// With works as GetMetricWith, but panics if an error occurs. The method allows +// neat syntax like: +// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() +func (m *MetricVec) With(labels Labels) Metric { + metric, err := m.GetMetricWith(labels) + if err != nil { + panic(err) + } + return metric +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual Metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + return m.deleteByHashWithLabelValues(h, lvs) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in the Desc of the MetricVec. However, such +// inconsistent Labels can never match an actual Metric, so the method will +// always return false in that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.deleteByHashWithLabels(h, labels) +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { + metrics, ok := m.children[h] + if !ok { + return false + } + + i := m.findMetricWithLabelValues(metrics, lvs) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.children[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.children, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { + metrics, ok := m.children[h] + if !ok { + return false + } + i := m.findMetricWithLabels(metrics, labels) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.children[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.children, h) + } + return true +} + +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.children { + delete(m.children, h) + } +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if len(vals) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, val := range vals { + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if len(labels) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, label := range m.desc.variableLabels { + val, ok := labels[label] + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithLabelValues(hash, lvs) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithLabelValues(hash, lvs) + if !ok { + // Copy to avoid allocation in case wo don't go down this code path. + copiedLVs := make([]string, len(lvs)) + copy(copiedLVs, lvs) + metric = m.newMetric(copiedLVs...) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithLabels(hash, labels) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithLabels(hash, labels) + if !ok { + lvs := m.extractLabelValues(labels) + metric = m.newMetric(lvs...) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithLabelValues gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { + metrics, ok := m.children[h] + if ok { + if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { + metrics, ok := m.children[h] + if ok { + if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { + for i, metric := range metrics { + if m.matchLabelValues(metric.values, lvs) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { + for i, metric := range metrics { + if m.matchLabels(metric.values, labels) { + return i + } + } + return len(metrics) +} + +func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { + if len(values) != len(lvs) { + return false + } + for i, v := range values { + if v != lvs[i] { + return false + } + } + return true +} + +func (m *MetricVec) matchLabels(values []string, labels Labels) bool { + if len(labels) != len(values) { + return false + } + for i, k := range m.desc.variableLabels { + if values[i] != labels[k] { + return false + } + } + return true +} + +func (m *MetricVec) extractLabelValues(labels Labels) []string { + labelValues := make([]string, len(labels)) + for i, k := range m.desc.variableLabels { + labelValues[i] = labels[k] + } + return labelValues +} diff --git a/images/404-server/vendor/github.com/prometheus/client_model/AUTHORS.md b/images/404-server/vendor/github.com/prometheus/client_model/AUTHORS.md new file mode 100644 index 000000000..e8b3efa6a --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_model/AUTHORS.md @@ -0,0 +1,13 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Björn Rabenstein + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Björn Rabenstein +* Matt T. Proud +* Tobias Schmidt diff --git a/images/404-server/vendor/github.com/prometheus/client_model/LICENSE b/images/404-server/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/images/404-server/vendor/github.com/prometheus/client_model/NOTICE b/images/404-server/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 000000000..20110e410 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/images/404-server/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/images/404-server/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 000000000..b065f8683 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,364 @@ +// Code generated by protoc-gen-go. +// source: metrics.proto +// DO NOT EDIT! + +/* +Package io_prometheus_client is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + LabelPair + Gauge + Counter + Quantile + Summary + Untyped + Histogram + Bucket + Metric + MetricFamily +*/ +package io_prometheus_client + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} diff --git a/images/404-server/vendor/github.com/prometheus/common/LICENSE b/images/404-server/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/images/404-server/vendor/github.com/prometheus/common/NOTICE b/images/404-server/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 000000000..636a2c1a5 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/decode.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 000000000..a7a42d5ef --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occured. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/encode.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 000000000..11839ed65 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/expfmt.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 000000000..371ac7503 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/fuzz.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 000000000..dc2eedeef --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/text_create.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 000000000..f11321cd0 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,303 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { + var written int + + // Fail-fast checks. + if len(in.Metric) == 0 { + return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return written, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err := fmt.Fprintf( + out, "# HELP %s %s\n", + name, escapeString(*in.Help, false), + ) + written += n + if err != nil { + return written, err + } + } + metricType := in.GetType() + n, err := fmt.Fprintf( + out, "# TYPE %s %s\n", + name, strings.ToLower(metricType.String()), + ) + written += n + if err != nil { + return written, err + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Counter.GetValue(), + out, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Gauge.GetValue(), + out, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Untyped.GetValue(), + out, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + name, metric, + model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + q.GetValue(), + out, + ) + written += n + if err != nil { + return written, err + } + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Summary.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Summary.GetSampleCount()), + out, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, q := range metric.Histogram.Bucket { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, fmt.Sprint(q.GetUpperBound()), + float64(q.GetCumulativeCount()), + out, + ) + written += n + if err != nil { + return written, err + } + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, "+Inf", + float64(metric.Histogram.GetSampleCount()), + out, + ) + if err != nil { + return written, err + } + written += n + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Histogram.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Histogram.GetSampleCount()), + out, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeSample writes a single sample in text format to out, given the metric +// name, the metric proto message itself, optionally an additional label name +// and value (use empty strings if not required), and the value. The function +// returns the number of bytes written and any error encountered. +func writeSample( + name string, + metric *dto.Metric, + additionalLabelName, additionalLabelValue string, + value float64, + out io.Writer, +) (int, error) { + var written int + n, err := fmt.Fprint(out, name) + written += n + if err != nil { + return written, err + } + n, err = labelPairsToText( + metric.Label, + additionalLabelName, additionalLabelValue, + out, + ) + written += n + if err != nil { + return written, err + } + n, err = fmt.Fprintf(out, " %v", value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + n, err = out.Write([]byte{'\n'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// labelPairsToText converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'out'. An empty slice in combination with an +// empty string 'additionalLabelName' results in nothing being +// written. Otherwise, the label pairs are written, escaped as required by the +// text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. +func labelPairsToText( + in []*dto.LabelPair, + additionalLabelName, additionalLabelValue string, + out io.Writer, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var written int + separator := '{' + for _, lp := range in { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, lp.GetName(), escapeString(lp.GetValue(), true), + ) + written += n + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, additionalLabelName, + escapeString(additionalLabelValue, true), + ) + written += n + if err != nil { + return written, err + } + } + n, err := out.Write([]byte{'}'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +var ( + escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) + escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +// escapeString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +func escapeString(v string, includeDoubleQuote bool) string { + if includeDoubleQuote { + return escapeWithDoubleQuote.Replace(v) + } + + return escape.Replace(v) +} diff --git a/images/404-server/vendor/github.com/prometheus/common/expfmt/text_parse.go b/images/404-server/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 000000000..ef9a15077 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,753 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All +// other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/bitbucket.org/ww/goautoneg/README.txt b/images/404-server/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt similarity index 100% rename from vendor/bitbucket.org/ww/goautoneg/README.txt rename to images/404-server/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt diff --git a/vendor/bitbucket.org/ww/goautoneg/autoneg.go b/images/404-server/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go similarity index 100% rename from vendor/bitbucket.org/ww/goautoneg/autoneg.go rename to images/404-server/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go diff --git a/images/404-server/vendor/github.com/prometheus/common/model/alert.go b/images/404-server/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 000000000..35e739c7a --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/fingerprinting.go b/images/404-server/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 000000000..fc4de4106 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/fnv.go b/images/404-server/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 000000000..038fc1c90 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/labels.go b/images/404-server/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 000000000..41051a01a --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/labelset.go b/images/404-server/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 000000000..6eda08a73 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/metric.go b/images/404-server/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 000000000..f7250909b --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,103 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + separator = []byte{0} + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/model.go b/images/404-server/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 000000000..a7b969170 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/images/404-server/vendor/github.com/prometheus/common/model/signature.go b/images/404-server/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 000000000..8762b13c6 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/silence.go b/images/404-server/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 000000000..7538e2997 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definiton +// in the Prometheus eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/time.go b/images/404-server/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 000000000..548968aeb --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,249 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes and interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + *t = Time(v + va) + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// StringToDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/images/404-server/vendor/github.com/prometheus/common/model/value.go b/images/404-server/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 000000000..c9ed3ffd8 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// sematics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/.travis.yml b/images/404-server/vendor/github.com/prometheus/procfs/.travis.yml new file mode 100644 index 000000000..a9e28bf5d --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/.travis.yml @@ -0,0 +1,5 @@ +sudo: false +language: go +go: + - 1.6.4 + - 1.7.4 diff --git a/images/404-server/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/images/404-server/vendor/github.com/prometheus/procfs/CONTRIBUTING.md new file mode 100644 index 000000000..40503edbf --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/images/404-server/vendor/github.com/prometheus/procfs/LICENSE b/images/404-server/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/images/404-server/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/images/404-server/vendor/github.com/prometheus/procfs/MAINTAINERS.md new file mode 100644 index 000000000..35993c41c --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -0,0 +1 @@ +* Tobias Schmidt diff --git a/images/404-server/vendor/github.com/prometheus/procfs/Makefile b/images/404-server/vendor/github.com/prometheus/procfs/Makefile new file mode 100644 index 000000000..c264a49d1 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/Makefile @@ -0,0 +1,6 @@ +ci: + ! gofmt -l *.go | read nothing + go vet + go test -v ./... + go get github.com/golang/lint/golint + golint *.go diff --git a/images/404-server/vendor/github.com/prometheus/procfs/NOTICE b/images/404-server/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 000000000..53c5e9aa1 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/images/404-server/vendor/github.com/prometheus/procfs/README.md b/images/404-server/vendor/github.com/prometheus/procfs/README.md new file mode 100644 index 000000000..209549471 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/README.md @@ -0,0 +1,11 @@ +# procfs + +This procfs package provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) diff --git a/images/404-server/vendor/github.com/prometheus/procfs/buddyinfo.go b/images/404-server/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 000000000..680a9842a --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,95 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// NewBuddyInfo reads the buddyinfo statistics. +func NewBuddyInfo() ([]BuddyInfo, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewBuddyInfo() +} + +// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(string(line)) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/doc.go b/images/404-server/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 000000000..e2acd6d40 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/images/404-server/vendor/github.com/prometheus/procfs/fs.go b/images/404-server/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 000000000..17546756b --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,46 @@ +package procfs + +import ( + "fmt" + "os" + "path" + + "github.com/prometheus/procfs/xfs" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} + +// XFSStats retrieves XFS filesystem runtime statistics. +func (fs FS) XFSStats() (*xfs.Stats, error) { + f, err := os.Open(fs.Path("fs/xfs/stat")) + if err != nil { + return nil, err + } + defer f.Close() + + return xfs.ParseStats(f) +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/ipvs.go b/images/404-server/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 000000000..696d114e7 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,246 @@ +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The remote (real) IP address. + RemoteAddress net.IP + // The remote (real) port. + RemotePort uint16 + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(string(scanner.Text())) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/mdstat.go b/images/404-server/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 000000000..d7a248c0d --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,138 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") + for i, l := range lines { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + mdName := mainLine[0] + activityState := mainLine[2] + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) + } + + active, total, size, err := evalStatusline(lines[i+1]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // j is the line number of the syncing-line. + j := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + j = i + 3 + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{ + Name: mdName, + ActivityState: activityState, + DisksActive: active, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { + matches := buildlineRE.FindStringSubmatch(buildline) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected buildline: %s", buildline) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedBlocks, nil +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/mountstats.go b/images/404-server/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 000000000..6b2b0ba9d --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,556 @@ +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10Len = 10 + fieldTransport11Len = 13 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueTime time.Duration + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseTime time.Duration + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestTime time.Duration +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTime time.Duration + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[2:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Number of expected fields in each per-operation statistics set + numFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) != numFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, numFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + ops = append(ops, NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, + CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, + CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + }) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + switch statVersion { + case statVersion10: + if len(ss) != fieldTransport10Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + if len(ss) != fieldTransport11Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11Len) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + return &NFSTransportStats{ + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTime: time.Duration(ns[4]) * time.Second, + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/proc.go b/images/404-server/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 000000000..8717e1fe0 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,224 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/proc_io.go b/images/404-server/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 000000000..b4e31d7ba --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,55 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + if err != nil { + return pio, err + } + + return pio, nil +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/proc_limits.go b/images/404-server/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 000000000..2df997ce1 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,137 @@ +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int + // Maximum size of files that the process may create. + FileSize int + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int + // Maximum size of the process stack in bytes. + StackSize int + // Maximum size of a core file. + CoreFileSize int + // Limit of the process's resident set in pages. + ResidentSet int + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return int(i), nil +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/proc_stat.go b/images/404-server/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 000000000..724e271b9 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,175 @@ +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/stat.go b/images/404-server/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 000000000..1ca217e8c --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,56 @@ +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime int64 +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + line := s.Text() + if !strings.HasPrefix(line, "btime") { + continue + } + fields := strings.Fields(line) + if len(fields) != 2 { + return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) + } + i, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) + } + return Stat{BootTime: i}, nil + } + if err := s.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/xfrm.go b/images/404-server/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 000000000..ffe9df50d --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldnt parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/xfs/parse.go b/images/404-server/vendor/github.com/prometheus/procfs/xfs/parse.go new file mode 100644 index 000000000..c8f6279f3 --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/xfs/parse.go @@ -0,0 +1,359 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +// ParseStats parses a Stats from an input io.Reader, using the format +// found in /proc/fs/xfs/stat. +func ParseStats(r io.Reader) (*Stats, error) { + const ( + // Fields parsed into stats structures. + fieldExtentAlloc = "extent_alloc" + fieldAbt = "abt" + fieldBlkMap = "blk_map" + fieldBmbt = "bmbt" + fieldDir = "dir" + fieldTrans = "trans" + fieldIg = "ig" + fieldLog = "log" + fieldRw = "rw" + fieldAttr = "attr" + fieldIcluster = "icluster" + fieldVnodes = "vnodes" + fieldBuf = "buf" + fieldXpc = "xpc" + + // Unimplemented at this time due to lack of documentation. + fieldPushAil = "push_ail" + fieldXstrat = "xstrat" + fieldAbtb2 = "abtb2" + fieldAbtc2 = "abtc2" + fieldBmbt2 = "bmbt2" + fieldIbt2 = "ibt2" + fieldFibt2 = "fibt2" + fieldQm = "qm" + fieldDebug = "debug" + ) + + var xfss Stats + + s := bufio.NewScanner(r) + for s.Scan() { + // Expect at least a string label and a single integer value, ex: + // - abt 0 + // - rw 1 2 + ss := strings.Fields(string(s.Bytes())) + if len(ss) < 2 { + continue + } + label := ss[0] + + // Extended precision counters are uint64 values. + if label == fieldXpc { + us, err := parseUint64s(ss[1:]) + if err != nil { + return nil, err + } + + xfss.ExtendedPrecision, err = extendedPrecisionStats(us) + if err != nil { + return nil, err + } + + continue + } + + // All other counters are uint32 values. + us, err := parseUint32s(ss[1:]) + if err != nil { + return nil, err + } + + switch label { + case fieldExtentAlloc: + xfss.ExtentAllocation, err = extentAllocationStats(us) + case fieldAbt: + xfss.AllocationBTree, err = btreeStats(us) + case fieldBlkMap: + xfss.BlockMapping, err = blockMappingStats(us) + case fieldBmbt: + xfss.BlockMapBTree, err = btreeStats(us) + case fieldDir: + xfss.DirectoryOperation, err = directoryOperationStats(us) + case fieldTrans: + xfss.Transaction, err = transactionStats(us) + case fieldIg: + xfss.InodeOperation, err = inodeOperationStats(us) + case fieldLog: + xfss.LogOperation, err = logOperationStats(us) + case fieldRw: + xfss.ReadWrite, err = readWriteStats(us) + case fieldAttr: + xfss.AttributeOperation, err = attributeOperationStats(us) + case fieldIcluster: + xfss.InodeClustering, err = inodeClusteringStats(us) + case fieldVnodes: + xfss.Vnode, err = vnodeStats(us) + case fieldBuf: + xfss.Buffer, err = bufferStats(us) + } + if err != nil { + return nil, err + } + } + + return &xfss, s.Err() +} + +// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. +func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { + if l := len(us); l != 4 { + return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) + } + + return ExtentAllocationStats{ + ExtentsAllocated: us[0], + BlocksAllocated: us[1], + ExtentsFreed: us[2], + BlocksFreed: us[3], + }, nil +} + +// btreeStats builds a BTreeStats from a slice of uint32s. +func btreeStats(us []uint32) (BTreeStats, error) { + if l := len(us); l != 4 { + return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) + } + + return BTreeStats{ + Lookups: us[0], + Compares: us[1], + RecordsInserted: us[2], + RecordsDeleted: us[3], + }, nil +} + +// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. +func blockMappingStats(us []uint32) (BlockMappingStats, error) { + if l := len(us); l != 7 { + return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) + } + + return BlockMappingStats{ + Reads: us[0], + Writes: us[1], + Unmaps: us[2], + ExtentListInsertions: us[3], + ExtentListDeletions: us[4], + ExtentListLookups: us[5], + ExtentListCompares: us[6], + }, nil +} + +// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. +func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { + if l := len(us); l != 4 { + return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) + } + + return DirectoryOperationStats{ + Lookups: us[0], + Creates: us[1], + Removes: us[2], + Getdents: us[3], + }, nil +} + +// TransactionStats builds a TransactionStats from a slice of uint32s. +func transactionStats(us []uint32) (TransactionStats, error) { + if l := len(us); l != 3 { + return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) + } + + return TransactionStats{ + Sync: us[0], + Async: us[1], + Empty: us[2], + }, nil +} + +// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. +func inodeOperationStats(us []uint32) (InodeOperationStats, error) { + if l := len(us); l != 7 { + return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) + } + + return InodeOperationStats{ + Attempts: us[0], + Found: us[1], + Recycle: us[2], + Missed: us[3], + Duplicate: us[4], + Reclaims: us[5], + AttributeChange: us[6], + }, nil +} + +// LogOperationStats builds a LogOperationStats from a slice of uint32s. +func logOperationStats(us []uint32) (LogOperationStats, error) { + if l := len(us); l != 5 { + return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) + } + + return LogOperationStats{ + Writes: us[0], + Blocks: us[1], + NoInternalBuffers: us[2], + Force: us[3], + ForceSleep: us[4], + }, nil +} + +// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. +func readWriteStats(us []uint32) (ReadWriteStats, error) { + if l := len(us); l != 2 { + return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) + } + + return ReadWriteStats{ + Read: us[0], + Write: us[1], + }, nil +} + +// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. +func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { + if l := len(us); l != 4 { + return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) + } + + return AttributeOperationStats{ + Get: us[0], + Set: us[1], + Remove: us[2], + List: us[3], + }, nil +} + +// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. +func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { + if l := len(us); l != 3 { + return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) + } + + return InodeClusteringStats{ + Iflush: us[0], + Flush: us[1], + FlushInode: us[2], + }, nil +} + +// VnodeStats builds a VnodeStats from a slice of uint32s. +func vnodeStats(us []uint32) (VnodeStats, error) { + // The attribute "Free" appears to not be available on older XFS + // stats versions. Therefore, 7 or 8 elements may appear in + // this slice. + l := len(us) + if l != 7 && l != 8 { + return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) + } + + s := VnodeStats{ + Active: us[0], + Allocate: us[1], + Get: us[2], + Hold: us[3], + Release: us[4], + Reclaim: us[5], + Remove: us[6], + } + + // Skip adding free, unless it is present. The zero value will + // be used in place of an actual count. + if l == 7 { + return s, nil + } + + s.Free = us[7] + return s, nil +} + +// BufferStats builds a BufferStats from a slice of uint32s. +func bufferStats(us []uint32) (BufferStats, error) { + if l := len(us); l != 9 { + return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) + } + + return BufferStats{ + Get: us[0], + Create: us[1], + GetLocked: us[2], + GetLockedWaited: us[3], + BusyLocked: us[4], + MissLocked: us[5], + PageRetries: us[6], + PageFound: us[7], + GetRead: us[8], + }, nil +} + +// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. +func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { + if l := len(us); l != 3 { + return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) + } + + return ExtendedPrecisionStats{ + FlushBytes: us[0], + WriteBytes: us[1], + ReadBytes: us[2], + }, nil +} + +// parseUint32s parses a slice of strings into a slice of uint32s. +func parseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// parseUint64s parses a slice of strings into a slice of uint64s. +func parseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} diff --git a/images/404-server/vendor/github.com/prometheus/procfs/xfs/xfs.go b/images/404-server/vendor/github.com/prometheus/procfs/xfs/xfs.go new file mode 100644 index 000000000..d86794b7c --- /dev/null +++ b/images/404-server/vendor/github.com/prometheus/procfs/xfs/xfs.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xfs provides access to statistics exposed by the XFS filesystem. +package xfs + +// Stats contains XFS filesystem runtime statistics, parsed from +// /proc/fs/xfs/stat. +// +// The names and meanings of each statistic were taken from +// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux +// kernel source. Most counters are uint32s (same data types used in +// xfs_stats.h), but some of the "extended precision stats" are uint64s. +type Stats struct { + // The name of the filesystem used to source these statistics. + // If empty, this indicates aggregated statistics for all XFS + // filesystems on the host. + Name string + + ExtentAllocation ExtentAllocationStats + AllocationBTree BTreeStats + BlockMapping BlockMappingStats + BlockMapBTree BTreeStats + DirectoryOperation DirectoryOperationStats + Transaction TransactionStats + InodeOperation InodeOperationStats + LogOperation LogOperationStats + ReadWrite ReadWriteStats + AttributeOperation AttributeOperationStats + InodeClustering InodeClusteringStats + Vnode VnodeStats + Buffer BufferStats + ExtendedPrecision ExtendedPrecisionStats +} + +// ExtentAllocationStats contains statistics regarding XFS extent allocations. +type ExtentAllocationStats struct { + ExtentsAllocated uint32 + BlocksAllocated uint32 + ExtentsFreed uint32 + BlocksFreed uint32 +} + +// BTreeStats contains statistics regarding an XFS internal B-tree. +type BTreeStats struct { + Lookups uint32 + Compares uint32 + RecordsInserted uint32 + RecordsDeleted uint32 +} + +// BlockMappingStats contains statistics regarding XFS block maps. +type BlockMappingStats struct { + Reads uint32 + Writes uint32 + Unmaps uint32 + ExtentListInsertions uint32 + ExtentListDeletions uint32 + ExtentListLookups uint32 + ExtentListCompares uint32 +} + +// DirectoryOperationStats contains statistics regarding XFS directory entries. +type DirectoryOperationStats struct { + Lookups uint32 + Creates uint32 + Removes uint32 + Getdents uint32 +} + +// TransactionStats contains statistics regarding XFS metadata transactions. +type TransactionStats struct { + Sync uint32 + Async uint32 + Empty uint32 +} + +// InodeOperationStats contains statistics regarding XFS inode operations. +type InodeOperationStats struct { + Attempts uint32 + Found uint32 + Recycle uint32 + Missed uint32 + Duplicate uint32 + Reclaims uint32 + AttributeChange uint32 +} + +// LogOperationStats contains statistics regarding the XFS log buffer. +type LogOperationStats struct { + Writes uint32 + Blocks uint32 + NoInternalBuffers uint32 + Force uint32 + ForceSleep uint32 +} + +// ReadWriteStats contains statistics regarding the number of read and write +// system calls for XFS filesystems. +type ReadWriteStats struct { + Read uint32 + Write uint32 +} + +// AttributeOperationStats contains statistics regarding manipulation of +// XFS extended file attributes. +type AttributeOperationStats struct { + Get uint32 + Set uint32 + Remove uint32 + List uint32 +} + +// InodeClusteringStats contains statistics regarding XFS inode clustering +// operations. +type InodeClusteringStats struct { + Iflush uint32 + Flush uint32 + FlushInode uint32 +} + +// VnodeStats contains statistics regarding XFS vnode operations. +type VnodeStats struct { + Active uint32 + Allocate uint32 + Get uint32 + Hold uint32 + Release uint32 + Reclaim uint32 + Remove uint32 + Free uint32 +} + +// BufferStats contains statistics regarding XFS read/write I/O buffers. +type BufferStats struct { + Get uint32 + Create uint32 + GetLocked uint32 + GetLockedWaited uint32 + BusyLocked uint32 + MissLocked uint32 + PageRetries uint32 + PageFound uint32 + GetRead uint32 +} + +// ExtendedPrecisionStats contains high precision counters used to track the +// total number of bytes read, written, or flushed, during XFS operations. +type ExtendedPrecisionStats struct { + FlushBytes uint64 + WriteBytes uint64 + ReadBytes uint64 +} diff --git a/images/echoheaders/Dockerfile b/images/echoheaders/Dockerfile index 9438d131e..fd4c641bb 100644 --- a/images/echoheaders/Dockerfile +++ b/images/echoheaders/Dockerfile @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM gcr.io/google_containers/nginx-slim:0.17 +FROM BASEIMAGE ADD nginx.conf /etc/nginx/nginx.conf +ADD template.lua /usr/local/share/lua/5.1/ ADD README.md README.md diff --git a/images/echoheaders/Makefile b/images/echoheaders/Makefile index abab0b383..4ad95cdb7 100644 --- a/images/echoheaders/Makefile +++ b/images/echoheaders/Makefile @@ -1,11 +1,64 @@ -all: push +# Copyright 2017 The Kubernetes Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -# TAG 0.0 shouldn't clobber any release builds -TAG = 1.5 -PREFIX = gcr.io/google_containers/echoserver +TAG = 1.8 +REGISTRY = gcr.io/google_containers +ARCH ?= $(shell go env GOARCH) +ALL_ARCH = amd64 arm ppc64le -container: - docker build -t $(PREFIX):$(TAG) . +QEMUVERSION=v2.7.0 -push: container - gcloud docker -- push $(PREFIX):$(TAG) +IMGNAME = echoserver +IMAGE = $(REGISTRY)/$(IMGNAME) +MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) + +# Set default base image dynamically for each arch +BASEIMAGE?=gcr.io/google_containers/nginx-slim-$(ARCH):0.21 + +TEMP_DIR := $(shell mktemp -d) + +all: all-container + +sub-container-%: + $(MAKE) ARCH=$* container + +sub-push-%: + $(MAKE) ARCH=$* push + +all-container: $(addprefix sub-container-,$(ALL_ARCH)) + +all-push: $(addprefix sub-push-,$(ALL_ARCH)) + +container: .container-$(ARCH) +.container-$(ARCH): + cp ./* $(TEMP_DIR) + cd $(TEMP_DIR) && sed -i 's|BASEIMAGE|$(BASEIMAGE)|g' Dockerfile + + docker build -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR) + +ifeq ($(ARCH), amd64) + # This is for to maintain the backward compatibility + docker tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):$(TAG) +endif + +push: .push-$(ARCH) +.push-$(ARCH): .container-$(ARCH) + gcloud docker -- push $(MULTI_ARCH_IMG):$(TAG) +ifeq ($(ARCH), amd64) + gcloud docker -- push $(IMAGE):$(TAG) +endif + +clean: $(addprefix sub-clean-,$(ALL_ARCH)) +sub-clean-%: + docker rmi -f $(IMAGE)-$*:$(TAG) || true diff --git a/images/echoheaders/README.md b/images/echoheaders/README.md index f18f7ab3a..d023094ef 100644 --- a/images/echoheaders/README.md +++ b/images/echoheaders/README.md @@ -2,7 +2,7 @@ This is a simple server that responds with the http headers it received. -Image versions >= 1.4 removes the redirect introduced in 1.3. -Image versions >= 1.3 redirect requests on :80 with `X-Forwarded-Proto: http` to :443. -Image versions > 1.0 run an nginx server, and implement the echoserver using lua in the nginx config. +Image versions >= 1.4 removes the redirect introduced in 1.3. +Image versions >= 1.3 redirect requests on :80 with `X-Forwarded-Proto: http` to :443. +Image versions > 1.0 run an nginx server, and implement the echoserver using lua in the nginx config. Image versions <= 1.0 run a python http server instead of nginx, and don't redirect any requests. diff --git a/images/echoheaders/echo-app.yaml b/images/echoheaders/echo-app.yaml index 59c53b33e..4ffe68134 100644 --- a/images/echoheaders/echo-app.yaml +++ b/images/echoheaders/echo-app.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: echoheaders - image: gcr.io/google_containers/echoserver:1.5 + image: gcr.io/google_containers/echoserver:1.7 ports: - containerPort: 8080 env: diff --git a/images/echoheaders/nginx.conf b/images/echoheaders/nginx.conf index 83f8245f9..7543612e4 100644 --- a/images/echoheaders/nginx.conf +++ b/images/echoheaders/nginx.conf @@ -1,5 +1,5 @@ events { - worker_connections 1024; + worker_connections 1024; } env HOSTNAME; @@ -9,73 +9,75 @@ env POD_NAMESPACE; env POD_IP; http { - default_type 'text/plain'; - # maximum allowed size of the client request body. By default this is 1m. - # Request with bigger bodies nginx will return error code 413. - # http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size - client_max_body_size 10m; + default_type 'text/plain'; + # maximum allowed size of the client request body. By default this is 1m. + # Request with bigger bodies nginx will return error code 413. + # http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size + client_max_body_size 10m; - server { - # please check the benefits of reuseport https://www.nginx.com/blog/socket-sharding-nginx-release-1-9-1 - # basically instructs to create an individual listening socket for each worker process (using the SO_REUSEPORT - # socket option), allowing a kernel to distribute incoming connections between worker processes. - listen 8080 default_server reuseport; + init_by_lua_block { + local template = require("template") + -- template syntax documented here: + -- https://github.com/bungle/lua-resty-template/blob/master/README.md + tmpl = template.compile([[ - # Replace '_' with your hostname. - server_name _; - location / { - lua_need_request_body on; - content_by_lua_block { - ngx.header["Server"] = "echoserver" +Hostname: {{os.getenv("HOSTNAME") or "N/A"}} - ngx.say("") - ngx.say("") - ngx.say("Hostname: ", os.getenv("HOSTNAME") or "N/A") - ngx.say("") +Pod Information: +{% if os.getenv("POD_NAME") then %} + node name: {{os.getenv("NODE_NAME") or "N/A"}} + pod name: {{os.getenv("POD_NAME") or "N/A"}} + pod namespace: {{os.getenv("POD_NAMESPACE") or "N/A"}} + pod IP: {{os.getenv("POD_IP") or "N/A"}} +{% else %} + -no pod information available- +{% end %} - ngx.say("Pod Information:") - if os.getenv("POD_NAME") then - ngx.say("\tnode name:\t ", os.getenv("NODE_NAME") or "N/A") - ngx.say("\tpod name:\t ", os.getenv("POD_NAME") or "N/A") - ngx.say("\tpod namespace:\t ", os.getenv("POD_NAMESPACE") or "N/A") - ngx.say("\tpod IP: \t ", os.getenv("POD_IP") or "N/A") - else - ngx.say("\t-no pod information available-") - end +Server values: + server_version=nginx: {{ngx.var.nginx_version}} - lua: {{ngx.config.ngx_lua_version}} - ngx.say("") +Request Information: + client_address={{ngx.var.remote_addr}} + method={{ngx.req.get_method()}} + real path={{ngx.var.request_uri}} + query={{ngx.var.query_string or ""}} + request_version={{ngx.req.http_version()}} + request_uri={{ngx.var.scheme.."://"..ngx.var.host..":"..ngx.var.server_port..ngx.var.request_uri}} - ngx.say("Server values:") - ngx.say("\tserver_version=", "nginx: "..ngx.var.nginx_version.." - lua: "..ngx.config.ngx_lua_version) - ngx.say("") +Request Headers: +{% for i, key in ipairs(keys) do %} + {{key}}={{headers[key]}} +{% end %} - ngx.say("Request Information:") - ngx.say("\tclient_address=", ngx.var.remote_addr) - ngx.say("\tmethod=", ngx.req.get_method()) - ngx.say("\treal path=", ngx.var.request_uri) - ngx.say("\tquery=", ngx.var.query_string or "") - ngx.say("\trequest_version=", ngx.req.http_version()) - ngx.say("\trequest_uri=", ngx.var.scheme.."://"..ngx.var.host..":"..ngx.var.server_port..ngx.var.request_uri) - ngx.say("") +Request Body: +{{ngx.var.request_body or " -no body in request-"}} +]]) + } - ngx.say("Request Headers:") - local headers = ngx.req.get_headers() - local keys = {} - for key, val in pairs(headers) do - table.insert(keys, key) - end + server { + # please check the benefits of reuseport https://www.nginx.com/blog/socket-sharding-nginx-release-1-9-1 + # basically instructs to create an individual listening socket for each worker process (using the SO_REUSEPORT + # socket option), allowing a kernel to distribute incoming connections between worker processes. + listen 8080 default_server reuseport; - table.sort(keys) - for i, key in ipairs(keys) do - ngx.say("\t", key, "=", headers[key]) - end - ngx.say("") + # Replace '_' with your hostname. + server_name _; - ngx.say("Request Body:") - ngx.say(ngx.var.request_body or "\t-no body in request-"); - ngx.say("") - } - } - } + location / { + lua_need_request_body on; + content_by_lua_block { + ngx.header["Server"] = "echoserver" + + local headers = ngx.req.get_headers() + local keys = {} + for key, val in pairs(headers) do + table.insert(keys, key) + end + table.sort(keys) + + ngx.say(tmpl({os=os, ngx=ngx, keys=keys, headers=headers})) + } + } + } } diff --git a/images/echoheaders/template.lua b/images/echoheaders/template.lua new file mode 100644 index 000000000..cc80308a7 --- /dev/null +++ b/images/echoheaders/template.lua @@ -0,0 +1,509 @@ +-- vendored from https://raw.githubusercontent.com/bungle/lua-resty-template/1f9a5c24fc7572dbf5be0b9f8168cc3984b03d24/lib/resty/template.lua +-- only modification: remove / from HTML_ENTITIES to not escape it, and fix the appropriate regex. +--[[ +Copyright (c) 2014 - 2017 Aapo Talvensaari +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--]] + +local setmetatable = setmetatable +local loadstring = loadstring +local loadchunk +local tostring = tostring +local setfenv = setfenv +local require = require +local capture +local concat = table.concat +local assert = assert +local prefix +local write = io.write +local pcall = pcall +local phase +local open = io.open +local load = load +local type = type +local dump = string.dump +local find = string.find +local gsub = string.gsub +local byte = string.byte +local null +local sub = string.sub +local ngx = ngx +local jit = jit +local var + +local _VERSION = _VERSION +local _ENV = _ENV +local _G = _G + +local HTML_ENTITIES = { + ["&"] = "&", + ["<"] = "<", + [">"] = ">", + ['"'] = """, + ["'"] = "'", +} + +local CODE_ENTITIES = { + ["{"] = "{", + ["}"] = "}", + ["&"] = "&", + ["<"] = "<", + [">"] = ">", + ['"'] = """, + ["'"] = "'", + ["/"] = "/" +} + +local VAR_PHASES + +local ok, newtab = pcall(require, "table.new") +if not ok then newtab = function() return {} end end + +local caching = true +local template = newtab(0, 12) + +template._VERSION = "1.9" +template.cache = {} + +local function enabled(val) + if val == nil then return true end + return val == true or (val == "1" or val == "true" or val == "on") +end + +local function trim(s) + return gsub(gsub(s, "^%s+", ""), "%s+$", "") +end + +local function rpos(view, s) + while s > 0 do + local c = sub(view, s, s) + if c == " " or c == "\t" or c == "\0" or c == "\x0B" then + s = s - 1 + else + break + end + end + return s +end + +local function escaped(view, s) + if s > 1 and sub(view, s - 1, s - 1) == "\\" then + if s > 2 and sub(view, s - 2, s - 2) == "\\" then + return false, 1 + else + return true, 1 + end + end + return false, 0 +end + +local function readfile(path) + local file = open(path, "rb") + if not file then return nil end + local content = file:read "*a" + file:close() + return content +end + +local function loadlua(path) + return readfile(path) or path +end + +local function loadngx(path) + local vars = VAR_PHASES[phase()] + local file, location = path, vars and var.template_location + if sub(file, 1) == "/" then file = sub(file, 2) end + if location and location ~= "" then + if sub(location, -1) == "/" then location = sub(location, 1, -2) end + local res = capture(concat{ location, '/', file}) + if res.status == 200 then return res.body end + end + local root = vars and (var.template_root or var.document_root) or prefix + if sub(root, -1) == "/" then root = sub(root, 1, -2) end + return readfile(concat{ root, "/", file }) or path +end + +do + if ngx then + VAR_PHASES = { + set = true, + rewrite = true, + access = true, + content = true, + header_filter = true, + body_filter = true, + log = true + } + template.print = ngx.print or write + template.load = loadngx + prefix, var, capture, null, phase = ngx.config.prefix(), ngx.var, ngx.location.capture, ngx.null, ngx.get_phase + if VAR_PHASES[phase()] then + caching = enabled(var.template_cache) + end + else + template.print = write + template.load = loadlua + end + if _VERSION == "Lua 5.1" then + local context = { __index = function(t, k) + return t.context[k] or t.template[k] or _G[k] + end } + if jit then + loadchunk = function(view) + return assert(load(view, nil, nil, setmetatable({ template = template }, context))) + end + else + loadchunk = function(view) + local func = assert(loadstring(view)) + setfenv(func, setmetatable({ template = template }, context)) + return func + end + end + else + local context = { __index = function(t, k) + return t.context[k] or t.template[k] or _ENV[k] + end } + loadchunk = function(view) + return assert(load(view, nil, nil, setmetatable({ template = template }, context))) + end + end +end + +function template.caching(enable) + if enable ~= nil then caching = enable == true end + return caching +end + +function template.output(s) + if s == nil or s == null then return "" end + if type(s) == "function" then return template.output(s()) end + return tostring(s) +end + +function template.escape(s, c) + if type(s) == "string" then + if c then return gsub(s, "[}{\">/<'&]", CODE_ENTITIES) end + return gsub(s, "[\"><'&]", HTML_ENTITIES) + end + return template.output(s) +end + +function template.new(view, layout) + assert(view, "view was not provided for template.new(view, layout).") + local render, compile = template.render, template.compile + if layout then + if type(layout) == "table" then + return setmetatable({ render = function(self, context) + local context = context or self + context.blocks = context.blocks or {} + context.view = compile(view)(context) + layout.blocks = context.blocks or {} + layout.view = context.view or "" + return layout:render() + end }, { __tostring = function(self) + local context = self + context.blocks = context.blocks or {} + context.view = compile(view)(context) + layout.blocks = context.blocks or {} + layout.view = context.view + return tostring(layout) + end }) + else + return setmetatable({ render = function(self, context) + local context = context or self + context.blocks = context.blocks or {} + context.view = compile(view)(context) + return render(layout, context) + end }, { __tostring = function(self) + local context = self + context.blocks = context.blocks or {} + context.view = compile(view)(context) + return compile(layout)(context) + end }) + end + end + return setmetatable({ render = function(self, context) + return render(view, context or self) + end }, { __tostring = function(self) + return compile(view)(self) + end }) +end + +function template.precompile(view, path, strip) + local chunk = dump(template.compile(view), strip ~= false) + if path then + local file = open(path, "wb") + file:write(chunk) + file:close() + end + return chunk +end + +function template.compile(view, key, plain) + assert(view, "view was not provided for template.compile(view, key, plain).") + if key == "no-cache" then + return loadchunk(template.parse(view, plain)), false + end + key = key or view + local cache = template.cache + if cache[key] then return cache[key], true end + local func = loadchunk(template.parse(view, plain)) + if caching then cache[key] = func end + return func, false +end + +function template.parse(view, plain) + assert(view, "view was not provided for template.parse(view, plain).") + if not plain then + view = template.load(view) + if byte(view, 1, 1) == 27 then return view end + end + local j = 2 + local c = {[[ +context=... or {} +local function include(v, c) return template.compile(v)(c or context) end +local ___,blocks,layout={},blocks or {} +]] } + local i, s = 1, find(view, "{", 1, true) + while s do + local t, p = sub(view, s + 1, s + 1), s + 2 + if t == "{" then + local e = find(view, "}}", p, true) + if e then + local z, w = escaped(view, s) + if i < s - w then + c[j] = "___[#___+1]=[=[\n" + c[j+1] = sub(view, i, s - 1 - w) + c[j+2] = "]=]\n" + j=j+3 + end + if z then + i = s + else + c[j] = "___[#___+1]=template.escape(" + c[j+1] = trim(sub(view, p, e - 1)) + c[j+2] = ")\n" + j=j+3 + s, i = e + 1, e + 2 + end + end + elseif t == "*" then + local e = find(view, "*}", p, true) + if e then + local z, w = escaped(view, s) + if i < s - w then + c[j] = "___[#___+1]=[=[\n" + c[j+1] = sub(view, i, s - 1 - w) + c[j+2] = "]=]\n" + j=j+3 + end + if z then + i = s + else + c[j] = "___[#___+1]=template.output(" + c[j+1] = trim(sub(view, p, e - 1)) + c[j+2] = ")\n" + j=j+3 + s, i = e + 1, e + 2 + end + end + elseif t == "%" then + local e = find(view, "%}", p, true) + if e then + local z, w = escaped(view, s) + if z then + if i < s - w then + c[j] = "___[#___+1]=[=[\n" + c[j+1] = sub(view, i, s - 1 - w) + c[j+2] = "]=]\n" + j=j+3 + end + i = s + else + local n = e + 2 + if sub(view, n, n) == "\n" then + n = n + 1 + end + local r = rpos(view, s - 1) + if i <= r then + c[j] = "___[#___+1]=[=[\n" + c[j+1] = sub(view, i, r) + c[j+2] = "]=]\n" + j=j+3 + end + c[j] = trim(sub(view, p, e - 1)) + c[j+1] = "\n" + j=j+2 + s, i = n - 1, n + end + end + elseif t == "(" then + local e = find(view, ")}", p, true) + if e then + local z, w = escaped(view, s) + if i < s - w then + c[j] = "___[#___+1]=[=[\n" + c[j+1] = sub(view, i, s - 1 - w) + c[j+2] = "]=]\n" + j=j+3 + end + if z then + i = s + else + local f = sub(view, p, e - 1) + local x = find(f, ",", 2, true) + if x then + c[j] = "___[#___+1]=include([=[" + c[j+1] = trim(sub(f, 1, x - 1)) + c[j+2] = "]=]," + c[j+3] = trim(sub(f, x + 1)) + c[j+4] = ")\n" + j=j+5 + else + c[j] = "___[#___+1]=include([=[" + c[j+1] = trim(f) + c[j+2] = "]=])\n" + j=j+3 + end + s, i = e + 1, e + 2 + end + end + elseif t == "[" then + local e = find(view, "]}", p, true) + if e then + local z, w = escaped(view, s) + if i < s - w then + c[j] = "___[#___+1]=[=[\n" + c[j+1] = sub(view, i, s - 1 - w) + c[j+2] = "]=]\n" + j=j+3 + end + if z then + i = s + else + c[j] = "___[#___+1]=include(" + c[j+1] = trim(sub(view, p, e - 1)) + c[j+2] = ")\n" + j=j+3 + s, i = e + 1, e + 2 + end + end + elseif t == "-" then + local e = find(view, "-}", p, true) + if e then + local x, y = find(view, sub(view, s, e + 1), e + 2, true) + if x then + local z, w = escaped(view, s) + if z then + if i < s - w then + c[j] = "___[#___+1]=[=[\n" + c[j+1] = sub(view, i, s - 1 - w) + c[j+2] = "]=]\n" + j=j+3 + end + i = s + else + y = y + 1 + x = x - 1 + if sub(view, y, y) == "\n" then + y = y + 1 + end + local b = trim(sub(view, p, e - 1)) + if b == "verbatim" or b == "raw" then + if i < s - w then + c[j] = "___[#___+1]=[=[\n" + c[j+1] = sub(view, i, s - 1 - w) + c[j+2] = "]=]\n" + j=j+3 + end + c[j] = "___[#___+1]=[=[" + c[j+1] = sub(view, e + 2, x) + c[j+2] = "]=]\n" + j=j+3 + else + if sub(view, x, x) == "\n" then + x = x - 1 + end + local r = rpos(view, s - 1) + if i <= r then + c[j] = "___[#___+1]=[=[\n" + c[j+1] = sub(view, i, r) + c[j+2] = "]=]\n" + j=j+3 + end + c[j] = 'blocks["' + c[j+1] = b + c[j+2] = '"]=include[=[' + c[j+3] = sub(view, e + 2, x) + c[j+4] = "]=]\n" + j=j+5 + end + s, i = y - 1, y + end + end + end + elseif t == "#" then + local e = find(view, "#}", p, true) + if e then + local z, w = escaped(view, s) + if i < s - w then + c[j] = "___[#___+1]=[=[\n" + c[j+1] = sub(view, i, s - 1 - w) + c[j+2] = "]=]\n" + j=j+3 + end + if z then + i = s + else + e = e + 2 + if sub(view, e, e) == "\n" then + e = e + 1 + end + s, i = e - 1, e + end + end + end + s = find(view, "{", s + 1, true) + end + s = sub(view, i) + if s and s ~= "" then + c[j] = "___[#___+1]=[=[\n" + c[j+1] = s + c[j+2] = "]=]\n" + j=j+3 + end + c[j] = "return layout and include(layout,setmetatable({view=table.concat(___),blocks=blocks},{__index=context})) or table.concat(___)" + return concat(c) +end + +function template.render(view, context, key, plain) + assert(view, "view was not provided for template.render(view, context, key, plain).") + return template.print(template.compile(view, key, plain)(context)) +end + +return template \ No newline at end of file diff --git a/images/nginx-slim/Makefile b/images/nginx-slim/Makefile index 6f66c1a4a..946d0cd99 100644 --- a/images/nginx-slim/Makefile +++ b/images/nginx-slim/Makefile @@ -13,32 +13,32 @@ # limitations under the License. # 0.0.0 shouldn't clobber any released builds -TAG = 0.18 +TAG = 0.24 REGISTRY = gcr.io/google_containers ARCH ?= $(shell go env GOARCH) -ALL_ARCH = amd64 arm ppc64le +ALL_ARCH = amd64 arm arm64 ppc64le -QEMUVERSION=v2.7.0 +QEMUVERSION=v2.9.1 IMGNAME = nginx-slim IMAGE = $(REGISTRY)/$(IMGNAME) MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) # Set default base image dynamically for each arch -BASEIMAGE?=gcr.io/google_containers/ubuntu-slim-$(ARCH):0.11 +BASEIMAGE?=gcr.io/google_containers/ubuntu-slim-$(ARCH):0.14 ifeq ($(ARCH),arm) QEMUARCH=arm endif -#ifeq ($(ARCH),arm64) -# QEMUARCH=aarch64 -#endif +ifeq ($(ARCH),arm64) + QEMUARCH=aarch64 +endif ifeq ($(ARCH),ppc64le) QEMUARCH=ppc64le endif -#ifeq ($(ARCH),s390x) -# QEMUARCH=s390x -#endif +ifeq ($(ARCH),s390x) + QEMUARCH=s390x +endif TEMP_DIR := $(shell mktemp -d) diff --git a/images/nginx-slim/README.md b/images/nginx-slim/README.md index 8d93e3fa4..060fc275c 100644 --- a/images/nginx-slim/README.md +++ b/images/nginx-slim/README.md @@ -1,10 +1,9 @@ -nginx 1.11.x base image using [ubuntu-slim](https://github.com/kubernetes/contrib/tree/master/images/ubuntu-slim) +nginx 1.13.x base image using [ubuntu-slim](https://github.com/kubernetes/ingress/tree/master/images/ubuntu-slim) nginx [engine x] is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP proxy server. This custom nginx image contains: -- [lua](https://github.com/openresty/lua-nginx-module) support - [stream](http://nginx.org/en/docs/stream/ngx_stream_core_module.html) tcp support for upstreams - nginx stats [nginx-module-vts](https://github.com/vozlt/nginx-module-vts) - [Dynamic TLS record sizing](https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/) @@ -15,7 +14,7 @@ This image does provides a default configuration file with no backend servers. *Using docker* ``` -$ docker run -v /some/nginx.con:/etc/nginx/nginx.conf:ro gcr.io/google_containers/nginx-slim:0.12 +$ docker run -v /some/nginx.con:/etc/nginx/nginx.conf:ro gcr.io/google_containers/nginx-slim:0.22 ``` *Creating a replication controller* diff --git a/images/nginx-slim/build.sh b/images/nginx-slim/build.sh index 916f0814e..5cfc779a5 100755 --- a/images/nginx-slim/build.sh +++ b/images/nginx-slim/build.sh @@ -17,15 +17,11 @@ set -e -export NGINX_VERSION=1.13.1 +export NGINX_VERSION=1.13.5 export NDK_VERSION=0.3.0 -export VTS_VERSION=0.1.14 +export VTS_VERSION=0.1.15 export SETMISC_VERSION=0.31 -export LUA_VERSION=0.10.8 export STICKY_SESSIONS_VERSION=08a395c66e42 -export LUA_CJSON_VERSION=2.1.0.4 -export LUA_RESTY_HTTP_VERSION=0.07 -export LUA_UPSTREAM_VERSION=0.06 export MORE_HEADERS_VERSION=0.32 export NGINX_DIGEST_AUTH=7955af9c77598c697ac292811914ce1e2b3b824c export NGINX_SUBSTITUTIONS=bc58cb11844bc42735bbaef7085ea86ace46d05b @@ -50,9 +46,7 @@ mkdir "$BUILD_PATH" cd "$BUILD_PATH" if [[ ${ARCH} == "ppc64le" ]]; then - apt-get update && apt-get install --no-install-recommends -y software-properties-common && \ - add-apt-repository -y ppa:ibmpackages/luajit - apt-get update && apt-get install --no-install-recommends -y lua5.1 lua5.1-dev + apt-get update && apt-get install --no-install-recommends -y software-properties-common fi # install required packages to build @@ -70,14 +64,11 @@ apt-get update && apt-get install --no-install-recommends -y \ zlib1g-dev \ libaio1 \ libaio-dev \ - luajit \ openssl \ - libluajit-5.1 \ - libluajit-5.1-dev \ linux-headers-generic || exit 1 # download, verify and extract the source files -get_src a5856c72a6609a4dc68c88a7f3c33b79e6693343b62952e021e043fe347b6776 \ +get_src 0e75b94429b3f745377aeba3aff97da77bf2b03fcb9ff15b3bad9b038db29f2e \ "http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz" get_src 88e05a99a8a7419066f5ae75966fb1efc409bad4522d14986da074554ae61619 \ @@ -86,42 +77,25 @@ get_src 88e05a99a8a7419066f5ae75966fb1efc409bad4522d14986da074554ae61619 \ get_src 97946a68937b50ab8637e1a90a13198fe376d801dc3e7447052e43c28e9ee7de \ "https://github.com/openresty/set-misc-nginx-module/archive/v$SETMISC_VERSION.tar.gz" -get_src e3b0018959ac899b73d3843e07351023f02be0ff421214426e3fe32193138963 \ +get_src 5112a054b1b1edb4c0042a9a840ef45f22abb3c05c68174e28ebf483164fb7e1 \ "https://github.com/vozlt/nginx-module-vts/archive/v$VTS_VERSION.tar.gz" -get_src d67449c71051b3cc2d6dd60df0ae0d21fca08aa19c9b30c5b95ee21ff38ef8dd \ - "https://github.com/openresty/lua-nginx-module/archive/v$LUA_VERSION.tar.gz" - -get_src 5417991b6db4d46383da2d18f2fd46b93fafcebfe87ba87f7cfeac4c9bcb0224 \ - "https://github.com/openresty/lua-cjson/archive/$LUA_CJSON_VERSION.tar.gz" - -get_src 1c6aa06c9955397c94e9c3e0c0fba4e2704e85bee77b4512fb54ae7c25d58d86 \ - "https://github.com/pintsized/lua-resty-http/archive/v$LUA_RESTY_HTTP_VERSION.tar.gz" - get_src c6d9dab8ea1fc997031007e2e8f47cced01417e203cd88d53a9fe9f6ae138720 \ "https://github.com/openresty/headers-more-nginx-module/archive/v$MORE_HEADERS_VERSION.tar.gz" -get_src 55475fe4f9e4b5220761269ccf0069ebb1ded61d7e7888f9c785c651cff3d141 \ - "https://github.com/openresty/lua-upstream-nginx-module/archive/v$LUA_UPSTREAM_VERSION.tar.gz" - get_src 53e440737ed1aff1f09fae150219a45f16add0c8d6e84546cb7d80f73ebffd90 \ "https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng/get/$STICKY_SESSIONS_VERSION.tar.gz" get_src 9b1d0075df787338bb607f14925886249bda60b6b3156713923d5d59e99a708b \ "https://github.com/atomx/nginx-http-auth-digest/archive/$NGINX_DIGEST_AUTH.tar.gz" -get_src 8eabbcd5950fdcc718bb0ef9165206c2ed60f67cd9da553d7bc3e6fe4e338461 \ +get_src 618551948ab14cac51d6e4ad00452312c7b09938f59ebff4f93875013be31f2d \ "https://github.com/yaoweibin/ngx_http_substitutions_filter_module/archive/$NGINX_SUBSTITUTIONS.tar.gz" #https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/ curl -sSL -o nginx__dynamic_tls_records.patch https://raw.githubusercontent.com/cloudflare/sslconfig/master/patches/nginx__1.11.5_dynamic_tls_records.patch -# https://github.com/openresty/lua-nginx-module/issues/1016 -curl -sSL -o patch-src-ngx_http_lua_headers.c.diff https://raw.githubusercontent.com/macports/macports-ports/master/www/nginx/files/patch-src-ngx_http_lua_headers.c.diff -cd "$BUILD_PATH/lua-nginx-module-$LUA_VERSION" -patch -p1 < $BUILD_PATH/patch-src-ngx_http_lua_headers.c.diff - # build nginx cd "$BUILD_PATH/nginx-$NGINX_VERSION" @@ -177,32 +151,13 @@ fi --add-module="$BUILD_PATH/ngx_devel_kit-$NDK_VERSION" \ --add-module="$BUILD_PATH/set-misc-nginx-module-$SETMISC_VERSION" \ --add-module="$BUILD_PATH/nginx-module-vts-$VTS_VERSION" \ - --add-module="$BUILD_PATH/lua-nginx-module-$LUA_VERSION" \ --add-module="$BUILD_PATH/headers-more-nginx-module-$MORE_HEADERS_VERSION" \ --add-module="$BUILD_PATH/nginx-goodies-nginx-sticky-module-ng-$STICKY_SESSIONS_VERSION" \ --add-module="$BUILD_PATH/nginx-http-auth-digest-$NGINX_DIGEST_AUTH" \ --add-module="$BUILD_PATH/ngx_http_substitutions_filter_module-$NGINX_SUBSTITUTIONS" \ - --add-module="$BUILD_PATH/lua-upstream-nginx-module-$LUA_UPSTREAM_VERSION" || exit 1 \ && make || exit 1 \ && make install || exit 1 -echo "Installing CJSON module" -cd "$BUILD_PATH/lua-cjson-$LUA_CJSON_VERSION" - -if [[ ${ARCH} == "ppc64le" ]];then - LUA_DIR=/usr/include/luajit-2.1 -else - LUA_DIR=/usr/include/luajit-2.0 -fi -make LUA_INCLUDE_DIR=${LUA_DIR} && make install - -echo "Installing lua-resty-http module" -# copy lua module -cd "$BUILD_PATH/lua-resty-http-$LUA_RESTY_HTTP_VERSION" -sed -i 's/resty.http_headers/http_headers/' $BUILD_PATH/lua-resty-http-$LUA_RESTY_HTTP_VERSION/lib/resty/http.lua -cp $BUILD_PATH/lua-resty-http-$LUA_RESTY_HTTP_VERSION/lib/resty/http.lua /usr/local/lib/lua/5.1 -cp $BUILD_PATH/lua-resty-http-$LUA_RESTY_HTTP_VERSION/lib/resty/http_headers.lua /usr/local/lib/lua/5.1 - echo "Cleaning..." cd / @@ -214,16 +169,10 @@ apt-mark unmarkauto \ libpcre3 \ zlib1g \ libaio1 \ - luajit \ - libluajit-5.1-2 \ xz-utils \ geoip-bin \ openssl -if [[ ${ARCH} == "ppc64le" ]]; then - apt-mark unmarkauto liblua5.1-0 -fi - apt-get remove -y --purge \ build-essential \ gcc-5 \ @@ -233,7 +182,6 @@ apt-get remove -y --purge \ libssl-dev \ zlib1g-dev \ libaio-dev \ - libluajit-5.1-dev \ linux-libc-dev \ perl-modules-5.22 \ linux-headers-generic diff --git a/images/nginx-slim/rc.yaml b/images/nginx-slim/rc.yaml index 960a190a0..07ba5f56d 100644 --- a/images/nginx-slim/rc.yaml +++ b/images/nginx-slim/rc.yaml @@ -29,6 +29,6 @@ spec: spec: containers: - name: nginxslim - image: gcr.io/google_containers/nginx-slim:0.16 + image: gcr.io/google_containers/nginx-slim:0.22 ports: - containerPort: 80 diff --git a/images/ubuntu-slim/Dockerfile.build b/images/ubuntu-slim/Dockerfile.build index b3adacb5a..baa26b565 100644 --- a/images/ubuntu-slim/Dockerfile.build +++ b/images/ubuntu-slim/Dockerfile.build @@ -28,7 +28,9 @@ RUN echo "Yes, do as I say!" | apt-get purge \ ncurses-base \ ncurses-bin \ locales \ - tzdata + tzdata \ + systemd \ + libsystemd0 # cleanup RUN apt-get autoremove -y && \ diff --git a/images/ubuntu-slim/Makefile b/images/ubuntu-slim/Makefile index 376c4d17e..11d5d503a 100755 --- a/images/ubuntu-slim/Makefile +++ b/images/ubuntu-slim/Makefile @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -TAG ?= 0.11 +TAG ?= 0.14 REGISTRY = gcr.io/google_containers ARCH ?= $(shell go env GOARCH) ALL_ARCH = amd64 arm arm64 ppc64le -QEMUVERSION=v2.7.0 +QEMUVERSION=v2.9.1 IMGNAME = ubuntu-slim IMAGE = $(REGISTRY)/$(IMGNAME) @@ -44,10 +44,10 @@ ifeq ($(ARCH),ppc64le) QEMUARCH=ppc64le endif -#ifeq ($(ARCH),s390x) -# BASEIMAGE?=s390x/ubuntu:16.04 -# QEMUARCH=s390x -#endif +ifeq ($(ARCH),s390x) + BASEIMAGE?=s390x/ubuntu:16.04 + QEMUARCH=s390x +endif TEMP_DIR := $(shell mktemp -d) diff --git a/tests/manifests/configuration-a.json b/tests/manifests/configuration-a.json new file mode 100644 index 000000000..c9ec72a2d --- /dev/null +++ b/tests/manifests/configuration-a.json @@ -0,0 +1,689 @@ +{ + "backends": [{ + "name": "upstream-default-backend", + "service": { + "metadata": { + "name": "default-http-backend", + "namespace": "kube-system", + "selfLink": "/api/v1/namespaces/kube-system/services/default-http-backend", + "uid": "907dc7db-5178-11e7-b3db-080027494b5d", + "resourceVersion": "3249707", + "creationTimestamp": "2017-06-15T03:13:12Z", + "labels": { + "k8s-app": "default-http-backend" + } + }, + "spec": { + "ports": [{ + "protocol": "TCP", + "port": 80, + "targetPort": 80 + }], + "selector": { + "k8s-app": "default-http-backend" + }, + "clusterIP": "10.0.0.131", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 0, + "secure": false, + "secureCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "sslPassthrough": false, + "endpoints": [{ + "address": "172.17.0.3", + "port": "80", + "maxFails": 0, + "failTimeout": 0, + "target": { + "kind": "Pod", + "namespace": "kube-system", + "name": "default-http-backend-3371703669-4dfnt", + "uid": "72e9c21f-793e-11e7-ac58-080027494b5d", + "resourceVersion": "3700969" + } + }], + "sessionAffinityConfig": { + "name": "", + "cookieSessionAffinity": { + "name": "", + "hash": "" + } + } + }, { + "name": "default-http-svc-80", + "service": { + "metadata": { + "name": "http-svc", + "namespace": "default", + "selfLink": "/api/v1/namespaces/default/services/http-svc", + "uid": "88b3c8a8-517e-11e7-b3db-080027494b5d", + "resourceVersion": "4242", + "creationTimestamp": "2017-06-15T03:55:55Z", + "labels": { + "app": "http-svc" + } + }, + "spec": { + "ports": [{ + "name": "http", + "protocol": "TCP", + "port": 80, + "targetPort": 8080, + "nodePort": 30301 + }], + "selector": { + "app": "http-svc" + }, + "clusterIP": "10.0.0.224", + "type": "NodePort", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 80, + "secure": false, + "secureCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "sslPassthrough": false, + "endpoints": [{ + "address": "172.17.0.2", + "port": "8080", + "maxFails": 0, + "failTimeout": 0, + "target": { + "kind": "Pod", + "namespace": "default", + "name": "http-svc-w088k", + "uid": "283a4361-7c7c-11e7-b124-080027494b5d", + "resourceVersion": "3700943" + } + }, { + "address": "172.17.0.5", + "port": "8080", + "maxFails": 0, + "failTimeout": 0, + "target": { + "kind": "Pod", + "namespace": "default", + "name": "http-svc-tp758", + "uid": "43a82d6e-7303-11e7-ac58-080027494b5d", + "resourceVersion": "3700959" + } + }], + "sessionAffinityConfig": { + "name": "", + "cookieSessionAffinity": { + "name": "", + "hash": "" + } + } + }, { + "name": "kube-system-kubernetes-dashboard-80", + "service": { + "metadata": { + "name": "kubernetes-dashboard", + "namespace": "kube-system", + "selfLink": "/api/v1/namespaces/kube-system/services/kubernetes-dashboard", + "uid": "b957713f-5176-11e7-b3db-080027494b5d", + "resourceVersion": "82", + "creationTimestamp": "2017-06-15T03:00:01Z", + "labels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "app": "kubernetes-dashboard", + "kubernetes.io/minikube-addons": "dashboard", + "kubernetes.io/minikube-addons-endpoint": "dashboard" + }, + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"app\":\"kubernetes-dashboard\",\"kubernetes.io/minikube-addons\":\"dashboard\",\"kubernetes.io/minikube-addons-endpoint\":\"dashboard\"},\"name\":\"kubernetes-dashboard\",\"namespace\":\"kube-system\"},\"spec\":{\"ports\":[{\"nodePort\":30000,\"port\":80,\"targetPort\":9090}],\"selector\":{\"app\":\"kubernetes-dashboard\"},\"type\":\"NodePort\"}}\n" + } + }, + "spec": { + "ports": [{ + "protocol": "TCP", + "port": 80, + "targetPort": 9090, + "nodePort": 30000 + }], + "selector": { + "app": "kubernetes-dashboard" + }, + "clusterIP": "10.0.0.120", + "type": "NodePort", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 80, + "secure": false, + "secureCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "sslPassthrough": false, + "endpoints": [{ + "address": "172.17.0.6", + "port": "9090", + "maxFails": 0, + "failTimeout": 0, + "target": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kubernetes-dashboard-m3qc0", + "uid": "b9511631-5176-11e7-b3db-080027494b5d", + "resourceVersion": "3700964" + } + }], + "sessionAffinityConfig": { + "name": "", + "cookieSessionAffinity": { + "name": "", + "hash": "" + } + } + }], + "servers": [{ + "hostname": "_", + "sslPassthrough": false, + "sslCertificate": "/ingress-controller/ssl/default-fake-certificate.pem", + "sslExpireTime": "0001-01-01T00:00:00Z", + "sslPemChecksum": "84d4ecb651dae44d625531bf77b6265d660b60b2", + "locations": [{ + "path": "/", + "isDefBackend": true, + "backend": "upstream-default-backend", + "service": { + "metadata": { + "name": "default-http-backend", + "namespace": "kube-system", + "selfLink": "/api/v1/namespaces/kube-system/services/default-http-backend", + "uid": "907dc7db-5178-11e7-b3db-080027494b5d", + "resourceVersion": "3249707", + "creationTimestamp": "2017-06-15T03:13:12Z", + "labels": { + "k8s-app": "default-http-backend" + } + }, + "spec": { + "ports": [{ + "protocol": "TCP", + "port": 80, + "targetPort": 80 + }], + "selector": { + "k8s-app": "default-http-backend" + }, + "clusterIP": "10.0.0.131", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 0, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false, + "fileSha": "" + }, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rpm": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "", + "addBaseUrl": false, + "sslRedirect": false, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1m", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off", + "nextUpstream": "error timeout invalid_header http_502 http_503 http_504" + }, + "certificateAuth": { + "authSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }] + }, { + "hostname": "dev.mycompany.com", + "sslPassthrough": false, + "sslCertificate": "/ingress-controller/ssl/default-mycompany.pem", + "sslExpireTime": "2027-06-20T20:28:25Z", + "sslPemChecksum": "b9282485e120e4fad8c25d15dc1b7984fcde99ba", + "locations": [{ + "path": "/bar", + "isDefBackend": false, + "backend": "default-http-svc-80", + "service": { + "metadata": { + "name": "http-svc", + "namespace": "default", + "selfLink": "/api/v1/namespaces/default/services/http-svc", + "uid": "88b3c8a8-517e-11e7-b3db-080027494b5d", + "resourceVersion": "4242", + "creationTimestamp": "2017-06-15T03:55:55Z", + "labels": { + "app": "http-svc" + } + }, + "spec": { + "ports": [{ + "name": "http", + "protocol": "TCP", + "port": 80, + "targetPort": 8080, + "nodePort": 30301 + }], + "selector": { + "app": "http-svc" + }, + "clusterIP": "10.0.0.224", + "type": "NodePort", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 80, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false, + "fileSha": "" + }, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rpm": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "", + "addBaseUrl": false, + "sslRedirect": true, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1m", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off", + "nextUpstream": "error timeout invalid_header http_502 http_503 http_504" + }, + "certificateAuth": { + "authSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }, { + "path": "/", + "isDefBackend": true, + "backend": "upstream-default-backend", + "service": { + "metadata": { + "creationTimestamp": null + }, + "spec": {}, + "status": { + "loadBalancer": {} + } + }, + "port": 0, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false, + "fileSha": "" + }, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rpm": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "", + "addBaseUrl": false, + "sslRedirect": false, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1m", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off", + "nextUpstream": "error timeout invalid_header http_502 http_503 http_504" + }, + "certificateAuth": { + "authSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }] + }, { + "hostname": "domain.tld", + "sslPassthrough": false, + "sslCertificate": "", + "sslExpireTime": "0001-01-01T00:00:00Z", + "sslPemChecksum": "", + "locations": [{ + "path": "/dashboard", + "isDefBackend": false, + "backend": "kube-system-kubernetes-dashboard-80", + "service": { + "metadata": { + "name": "kubernetes-dashboard", + "namespace": "kube-system", + "selfLink": "/api/v1/namespaces/kube-system/services/kubernetes-dashboard", + "uid": "b957713f-5176-11e7-b3db-080027494b5d", + "resourceVersion": "82", + "creationTimestamp": "2017-06-15T03:00:01Z", + "labels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "app": "kubernetes-dashboard", + "kubernetes.io/minikube-addons": "dashboard", + "kubernetes.io/minikube-addons-endpoint": "dashboard" + }, + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"app\":\"kubernetes-dashboard\",\"kubernetes.io/minikube-addons\":\"dashboard\",\"kubernetes.io/minikube-addons-endpoint\":\"dashboard\"},\"name\":\"kubernetes-dashboard\",\"namespace\":\"kube-system\"},\"spec\":{\"ports\":[{\"nodePort\":30000,\"port\":80,\"targetPort\":9090}],\"selector\":{\"app\":\"kubernetes-dashboard\"},\"type\":\"NodePort\"}}\n" + } + }, + "spec": { + "ports": [{ + "protocol": "TCP", + "port": 80, + "targetPort": 9090, + "nodePort": 30000 + }], + "selector": { + "app": "kubernetes-dashboard" + }, + "clusterIP": "10.0.0.120", + "type": "NodePort", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 80, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false, + "fileSha": "" + }, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rpm": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "/", + "addBaseUrl": false, + "sslRedirect": true, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1m", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off", + "nextUpstream": "error timeout invalid_header http_502 http_503 http_504" + }, + "certificateAuth": { + "authSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }, { + "path": "/", + "isDefBackend": true, + "backend": "upstream-default-backend", + "service": { + "metadata": { + "creationTimestamp": null + }, + "spec": {}, + "status": { + "loadBalancer": {} + } + }, + "port": 0, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false, + "fileSha": "" + }, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rpm": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "", + "addBaseUrl": false, + "sslRedirect": false, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1m", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off", + "nextUpstream": "error timeout invalid_header http_502 http_503 http_504" + }, + "certificateAuth": { + "authSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }] + }], + "TCPBackends": [], + "UDPBackends": [] +} \ No newline at end of file diff --git a/tests/manifests/configuration-b.json b/tests/manifests/configuration-b.json new file mode 100644 index 000000000..6cd73cb46 --- /dev/null +++ b/tests/manifests/configuration-b.json @@ -0,0 +1,689 @@ +{ + "backends": [{ + "name": "upstream-default-backend", + "service": { + "metadata": { + "name": "default-http-backend", + "namespace": "kube-system", + "selfLink": "/api/v1/namespaces/kube-system/services/default-http-backend", + "uid": "907dc7db-5178-11e7-b3db-080027494b5d", + "resourceVersion": "3249707", + "creationTimestamp": "2017-06-15T03:13:12Z", + "labels": { + "k8s-app": "default-http-backend" + } + }, + "spec": { + "ports": [{ + "protocol": "TCP", + "port": 80, + "targetPort": 80 + }], + "selector": { + "k8s-app": "default-http-backend" + }, + "clusterIP": "10.0.0.131", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 0, + "secure": false, + "secureCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "sslPassthrough": false, + "endpoints": [{ + "address": "172.17.0.3", + "port": "80", + "maxFails": 0, + "failTimeout": 0, + "target": { + "kind": "Pod", + "namespace": "kube-system", + "name": "default-http-backend-3371703669-4dfnt", + "uid": "72e9c21f-793e-11e7-ac58-080027494b5d", + "resourceVersion": "3700969" + } + }], + "sessionAffinityConfig": { + "name": "", + "cookieSessionAffinity": { + "name": "", + "hash": "" + } + } + }, { + "name": "default-http-svc-80", + "service": { + "metadata": { + "name": "http-svc", + "namespace": "default", + "selfLink": "/api/v1/namespaces/default/services/http-svc", + "uid": "88b3c8a8-517e-11e7-b3db-080027494b5d", + "resourceVersion": "4242", + "creationTimestamp": "2017-06-15T03:55:55Z", + "labels": { + "app": "http-svc" + } + }, + "spec": { + "ports": [{ + "name": "http", + "protocol": "TCP", + "port": 80, + "targetPort": 8080, + "nodePort": 30301 + }], + "selector": { + "app": "http-svc" + }, + "clusterIP": "10.0.0.224", + "type": "NodePort", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 80, + "secure": false, + "secureCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "sslPassthrough": false, + "endpoints": [{ + "address": "172.17.0.2", + "port": "8080", + "maxFails": 0, + "failTimeout": 0, + "target": { + "kind": "Pod", + "namespace": "default", + "name": "http-svc-w088k", + "uid": "283a4361-7c7c-11e7-b124-080027494b5d", + "resourceVersion": "3700943" + } + }, { + "address": "172.17.0.5", + "port": "8080", + "maxFails": 0, + "failTimeout": 0, + "target": { + "kind": "Pod", + "namespace": "default", + "name": "http-svc-tp758", + "uid": "43a82d6e-7303-11e7-ac58-080027494b5d", + "resourceVersion": "3700959" + } + }], + "sessionAffinityConfig": { + "name": "", + "cookieSessionAffinity": { + "name": "", + "hash": "" + } + } + }, { + "name": "kube-system-kubernetes-dashboard-80", + "service": { + "metadata": { + "name": "kubernetes-dashboard", + "namespace": "kube-system", + "selfLink": "/api/v1/namespaces/kube-system/services/kubernetes-dashboard", + "uid": "b957713f-5176-11e7-b3db-080027494b5d", + "resourceVersion": "82", + "creationTimestamp": "2017-06-15T03:00:01Z", + "labels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "app": "kubernetes-dashboard", + "kubernetes.io/minikube-addons": "dashboard", + "kubernetes.io/minikube-addons-endpoint": "dashboard" + }, + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"app\":\"kubernetes-dashboard\",\"kubernetes.io/minikube-addons\":\"dashboard\",\"kubernetes.io/minikube-addons-endpoint\":\"dashboard\"},\"name\":\"kubernetes-dashboard\",\"namespace\":\"kube-system\"},\"spec\":{\"ports\":[{\"nodePort\":30000,\"port\":80,\"targetPort\":9090}],\"selector\":{\"app\":\"kubernetes-dashboard\"},\"type\":\"NodePort\"}}\n" + } + }, + "spec": { + "ports": [{ + "protocol": "TCP", + "port": 80, + "targetPort": 9090, + "nodePort": 30000 + }], + "selector": { + "app": "kubernetes-dashboard" + }, + "clusterIP": "10.0.0.120", + "type": "NodePort", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 80, + "secure": false, + "secureCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "sslPassthrough": false, + "endpoints": [{ + "address": "172.17.0.6", + "port": "9090", + "maxFails": 0, + "failTimeout": 0, + "target": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kubernetes-dashboard-m3qc0", + "uid": "b9511631-5176-11e7-b3db-080027494b5d", + "resourceVersion": "3700964" + } + }], + "sessionAffinityConfig": { + "name": "", + "cookieSessionAffinity": { + "name": "", + "hash": "" + } + } + }], + "servers": [{ + "hostname": "domain.tld", + "sslPassthrough": false, + "sslCertificate": "", + "sslExpireTime": "0001-01-01T00:00:00Z", + "sslPemChecksum": "", + "locations": [{ + "path": "/dashboard", + "isDefBackend": false, + "backend": "kube-system-kubernetes-dashboard-80", + "service": { + "metadata": { + "name": "kubernetes-dashboard", + "namespace": "kube-system", + "selfLink": "/api/v1/namespaces/kube-system/services/kubernetes-dashboard", + "uid": "b957713f-5176-11e7-b3db-080027494b5d", + "resourceVersion": "82", + "creationTimestamp": "2017-06-15T03:00:01Z", + "labels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "app": "kubernetes-dashboard", + "kubernetes.io/minikube-addons": "dashboard", + "kubernetes.io/minikube-addons-endpoint": "dashboard" + }, + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"app\":\"kubernetes-dashboard\",\"kubernetes.io/minikube-addons\":\"dashboard\",\"kubernetes.io/minikube-addons-endpoint\":\"dashboard\"},\"name\":\"kubernetes-dashboard\",\"namespace\":\"kube-system\"},\"spec\":{\"ports\":[{\"nodePort\":30000,\"port\":80,\"targetPort\":9090}],\"selector\":{\"app\":\"kubernetes-dashboard\"},\"type\":\"NodePort\"}}\n" + } + }, + "spec": { + "ports": [{ + "protocol": "TCP", + "port": 80, + "targetPort": 9090, + "nodePort": 30000 + }], + "selector": { + "app": "kubernetes-dashboard" + }, + "clusterIP": "10.0.0.120", + "type": "NodePort", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 80, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false, + "fileSha": "" + }, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rpm": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "/", + "addBaseUrl": false, + "sslRedirect": true, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1m", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off", + "nextUpstream": "error timeout invalid_header http_502 http_503 http_504" + }, + "certificateAuth": { + "authSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }, { + "path": "/", + "isDefBackend": true, + "backend": "upstream-default-backend", + "service": { + "metadata": { + "creationTimestamp": null + }, + "spec": {}, + "status": { + "loadBalancer": {} + } + }, + "port": 0, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false, + "fileSha": "" + }, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rpm": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "", + "addBaseUrl": false, + "sslRedirect": false, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1m", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off", + "nextUpstream": "error timeout invalid_header http_502 http_503 http_504" + }, + "certificateAuth": { + "authSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }] + },{ + "hostname": "_", + "sslPassthrough": false, + "sslCertificate": "/ingress-controller/ssl/default-fake-certificate.pem", + "sslExpireTime": "0001-01-01T00:00:00Z", + "sslPemChecksum": "84d4ecb651dae44d625531bf77b6265d660b60b2", + "locations": [{ + "path": "/", + "isDefBackend": true, + "backend": "upstream-default-backend", + "service": { + "metadata": { + "name": "default-http-backend", + "namespace": "kube-system", + "selfLink": "/api/v1/namespaces/kube-system/services/default-http-backend", + "uid": "907dc7db-5178-11e7-b3db-080027494b5d", + "resourceVersion": "3249707", + "creationTimestamp": "2017-06-15T03:13:12Z", + "labels": { + "k8s-app": "default-http-backend" + } + }, + "spec": { + "ports": [{ + "protocol": "TCP", + "port": 80, + "targetPort": 80 + }], + "selector": { + "k8s-app": "default-http-backend" + }, + "clusterIP": "10.0.0.131", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 0, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false, + "fileSha": "" + }, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rpm": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "", + "addBaseUrl": false, + "sslRedirect": false, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1m", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off", + "nextUpstream": "error timeout invalid_header http_502 http_503 http_504" + }, + "certificateAuth": { + "authSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }] + }, { + "hostname": "dev.mycompany.com", + "sslPassthrough": false, + "sslCertificate": "/ingress-controller/ssl/default-mycompany.pem", + "sslExpireTime": "2027-06-20T20:28:25Z", + "sslPemChecksum": "b9282485e120e4fad8c25d15dc1b7984fcde99ba", + "locations": [{ + "path": "/bar", + "isDefBackend": false, + "backend": "default-http-svc-80", + "service": { + "metadata": { + "name": "http-svc", + "namespace": "default", + "selfLink": "/api/v1/namespaces/default/services/http-svc", + "uid": "88b3c8a8-517e-11e7-b3db-080027494b5d", + "resourceVersion": "4242", + "creationTimestamp": "2017-06-15T03:55:55Z", + "labels": { + "app": "http-svc" + } + }, + "spec": { + "ports": [{ + "name": "http", + "protocol": "TCP", + "port": 80, + "targetPort": 8080, + "nodePort": 30301 + }], + "selector": { + "app": "http-svc" + }, + "clusterIP": "10.0.0.224", + "type": "NodePort", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 80, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false, + "fileSha": "" + }, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rpm": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "", + "addBaseUrl": false, + "sslRedirect": true, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1m", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off", + "nextUpstream": "error timeout invalid_header http_502 http_503 http_504" + }, + "certificateAuth": { + "authSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }, { + "path": "/", + "isDefBackend": true, + "backend": "upstream-default-backend", + "service": { + "metadata": { + "creationTimestamp": null + }, + "spec": {}, + "status": { + "loadBalancer": {} + } + }, + "port": 0, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false, + "fileSha": "" + }, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rpm": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "", + "addBaseUrl": false, + "sslRedirect": false, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1m", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off", + "nextUpstream": "error timeout invalid_header http_502 http_503 http_504" + }, + "certificateAuth": { + "authSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }] + }], + "TCPBackends": [], + "UDPBackends": [] +} \ No newline at end of file diff --git a/tests/manifests/configuration-c.json b/tests/manifests/configuration-c.json new file mode 100644 index 000000000..ee2078015 --- /dev/null +++ b/tests/manifests/configuration-c.json @@ -0,0 +1,261 @@ +{ + "backends": [{ + "name": "upstream-default-backend", + "service": null, + "port": 0, + "secure": false, + "secureCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "sslPassthrough": false, + "endpoints": [{ + "address": "172.17.0.8", + "port": "8080", + "maxFails": 0, + "failTimeout": 0, + "target": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kubernetes-dashboard-m3qc0", + "uid": "b9511631-5176-11e7-b3db-080027494b5d", + "resourceVersion": "3700964" + } + }], + "SessionAffinity": { + "name": "", + "CookieSessionAffinity": { + "name": "", + "hash": "" + } + } + }, { + "name": "deis-deis-controller-8000", + "service": { + "metadata": { + "name": "deis-controller", + "namespace": "deis", + "selfLink": "/api/v1/namespaces/deis/services/deis-controller", + "uid": "1cba01a8-50b0-11e7-a384-0800270f5693", + "resourceVersion": "532", + "creationTimestamp": "2017-06-14T03:18:18Z", + "labels": { + "heritage": "deis" + } + }, + "spec": { + "ports": [{ + "name": "http", + "protocol": "TCP", + "port": 8000, + "targetPort": 8000, + "nodePort": 30171 + }], + "selector": { + "app": "deis-controller" + }, + "clusterIP": "10.0.0.198", + "type": "NodePort", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 8000, + "secure": false, + "secureCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "sslPassthrough": false, + "endpoints": [{ + "address": "172.17.0.7", + "port": "8000", + "maxFails": 0, + "failTimeout": 0 + }], + "SessionAffinity": { + "name": "", + "CookieSessionAffinity": { + "name": "", + "hash": "" + } + } + }], + "servers": [{ + "hostname": "_", + "sslPassthrough": false, + "sslCertificate": "/ingress-controller/ssl/default-fake-certificate.pem", + "sslExpireTime": "0001-01-01T00:00:00Z", + "sslPemChecksum": "123b44425920a2e4825ae779fba0e6e07fbac03d", + "locations": [{ + "path": "/", + "isDefBackend": true, + "backend": "upstream-default-backend", + "service": null, + "port": 0, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false + }, + "Denied": null, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "", + "addBaseUrl": false, + "sslRedirect": false, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1g", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off" + }, + "certificateAuth": { + "AuthSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }] + }, { + "hostname": "deis.minikube", + "sslPassthrough": false, + "sslCertificate": "", + "sslExpireTime": "0001-01-01T00:00:00Z", + "sslPemChecksum": "", + "locations": [{ + "path": "/", + "isDefBackend": false, + "backend": "deis-deis-controller-8000", + "service": { + "metadata": { + "name": "deis-controller", + "namespace": "deis", + "selfLink": "/api/v1/namespaces/deis/services/deis-controller", + "uid": "1cba01a8-50b0-11e7-a384-0800270f5693", + "resourceVersion": "532", + "creationTimestamp": "2017-06-14T03:18:18Z", + "labels": { + "heritage": "deis" + } + }, + "spec": { + "ports": [{ + "name": "http", + "protocol": "TCP", + "port": 8000, + "targetPort": 8000, + "nodePort": 30171 + }], + "selector": { + "app": "deis-controller" + }, + "clusterIP": "10.0.0.198", + "type": "NodePort", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + "port": 8000, + "basicDigestAuth": { + "type": "", + "realm": "", + "file": "", + "secured": false + }, + "Denied": null, + "externalAuth": { + "url": "", + "host": "", + "signinUrl": "", + "method": "", + "sendBody": false, + "responseHeaders": null + }, + "rateLimit": { + "connections": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + }, + "rps": { + "name": "", + "limit": 0, + "burst": 0, + "sharedSize": 0 + } + }, + "redirect": { + "target": "", + "addBaseUrl": false, + "sslRedirect": true, + "forceSSLRedirect": false, + "appRoot": "" + }, + "whitelist": { + "cidr": null + }, + "proxy": { + "bodySize": "1g", + "conectTimeout": 5, + "sendTimeout": 60, + "readTimeout": 60, + "bufferSize": "4k", + "cookieDomain": "off", + "cookiePath": "off" + }, + "certificateAuth": { + "AuthSSLCert": { + "secret": "", + "caFilename": "", + "pemSha": "" + }, + "validationDepth": 0 + }, + "use-port-in-redirects": false, + "configuration-snippet": "" + }] + }] +} diff --git a/vendor/bitbucket.org/ww/goautoneg/Makefile b/vendor/bitbucket.org/ww/goautoneg/Makefile deleted file mode 100644 index e33ee1730..000000000 --- a/vendor/bitbucket.org/ww/goautoneg/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -include $(GOROOT)/src/Make.inc - -TARG=bitbucket.org/ww/goautoneg -GOFILES=autoneg.go - -include $(GOROOT)/src/Make.pkg - -format: - gofmt -w *.go - -docs: - gomake clean - godoc ${TARG} > README.txt diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index 2a7cfd2bf..c83641619 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -1,4 +1,6 @@ -Copyright (c) 2012-2013 Dave Collins +ISC License + +Copyright (c) 2012-2016 Dave Collins Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 565bf5899..8a4a6589a 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above @@ -13,9 +13,10 @@ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine and "-tags disableunsafe" -// is not added to the go build command line. -// +build !appengine,!disableunsafe +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 457e41235..1fe3cf3d5 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above @@ -13,9 +13,10 @@ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // NOTE: Due to the following build constraints, this file will only be compiled -// when either the code is running on Google App Engine or "-tags disableunsafe" -// is added to the go build command line. -// +build appengine disableunsafe +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 14f02dc15..1be8ce945 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) { w.Write(closeParenBytes) } -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' // prefix to Writer w. func printHexPtr(w io.Writer, p uintptr) { // Null pointer. diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go index ee1ab07b3..2e3d22f31 100644 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -64,9 +64,18 @@ type ConfigState struct { // inside these interface methods. As a result, this option relies on // access to the unsafe package, so it will not have any effect when // running in environments without access to the unsafe package such as - // Google App Engine or with the "disableunsafe" build tag specified. + // Google App Engine or with the "safe" build tag specified. DisablePointerMethods bool + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + // ContinueOnMethod specifies whether or not recursion should continue once // a custom error or Stringer interface is invoked. The default, false, // means it will print the results of invoking the custom error or Stringer diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go index 5be0c4060..aacaac6f1 100644 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -91,6 +91,15 @@ The following configuration options are available: which only accept pointer receivers from non-pointer variables. Pointer method invocation is enabled by default. + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + * ContinueOnMethod Enables recursion into types after invoking error and Stringer interface methods. Recursion after method invocation is disabled by default. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index a0ff95e27..df1d582a7 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) { d.w.Write(closeParenBytes) // Display pointer information. - if len(pointerChain) > 0 { + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { d.w.Write(openParenBytes) for i, addr := range pointerChain { if i > 0 { @@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) { case reflect.Map, reflect.String: valueLen = v.Len() } - if valueLen != 0 || valueCap != 0 { + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { d.w.Write(openParenBytes) if valueLen != 0 { d.w.Write(lenEqualsBytes) printInt(d.w, int64(valueLen), 10) } - if valueCap != 0 { + if !d.cs.DisableCapacities && valueCap != 0 { if valueLen != 0 { d.w.Write(spaceBytes) } diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index ecf3b80e2..c49875bac 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go index d8233f542..32c0e3388 100644 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/go-openapi/analysis/.drone.yml b/vendor/github.com/go-openapi/analysis/.drone.yml deleted file mode 100644 index 6c2683311..000000000 --- a/vendor/github.com/go-openapi/analysis/.drone.yml +++ /dev/null @@ -1,36 +0,0 @@ -clone: - path: github.com/go-openapi/analysis - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - commands: - - go get -u github.com/stretchr/testify/assert - - go get -u gopkg.in/yaml.v2 - - go get -u github.com/go-openapi/swag - - go get -u github.com/go-openapi/jsonpointer - - go get -u github.com/go-openapi/spec - - go get -u github.com/go-openapi/loads/fmts - - go test -race ./... - - go test -v -cover -coverprofile=coverage.out -covermode=count ./... - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore deleted file mode 100644 index dd91ed6a0..000000000 --- a/vendor/github.com/go-openapi/analysis/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/analysis/.pullapprove.yml b/vendor/github.com/go-openapi/analysis/.pullapprove.yml deleted file mode 100644 index 4ab790edb..000000000 --- a/vendor/github.com/go-openapi/analysis/.pullapprove.yml +++ /dev/null @@ -1,12 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md deleted file mode 100644 index b6e526c00..000000000 --- a/vendor/github.com/go-openapi/analysis/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# OpenAPI initiative analysis [![Build Status](https://ci.vmware.run/api/badges/go-openapi/analysis/status.svg)](https://ci.vmware.run/go-openapi/analysis) [![Coverage](https://coverage.vmware.run/badges/go-openapi/analysis/coverage.svg)](https://coverage.vmware.run/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis) - - -A foundational library to analyze an OAI specification document for easier reasoning about the content. \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go deleted file mode 100644 index d388db3a7..000000000 --- a/vendor/github.com/go-openapi/analysis/analyzer.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - slashpath "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -type referenceAnalysis struct { - schemas map[string]spec.Ref - responses map[string]spec.Ref - parameters map[string]spec.Ref - items map[string]spec.Ref - allRefs map[string]spec.Ref - referenced struct { - schemas map[string]SchemaRef - responses map[string]*spec.Response - parameters map[string]*spec.Parameter - } -} - -func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { - r.allRefs["#"+key] = ref -} - -func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items) { - r.items["#"+key] = items.Ref - r.addRef(key, items.Ref) -} - -func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { - r.schemas["#"+key] = ref.Schema.Ref - r.addRef(key, ref.Schema.Ref) -} - -func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { - r.responses["#"+key] = resp.Ref - r.addRef(key, resp.Ref) -} - -func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { - r.parameters["#"+key] = param.Ref - r.addRef(key, param.Ref) -} - -// New takes a swagger spec object and returns an analyzed spec document. -// The analyzed document contains a number of indices that make it easier to -// reason about semantics of a swagger specification for use in code generation -// or validation etc. -func New(doc *spec.Swagger) *Spec { - a := &Spec{ - spec: doc, - consumes: make(map[string]struct{}, 150), - produces: make(map[string]struct{}, 150), - authSchemes: make(map[string]struct{}, 150), - operations: make(map[string]map[string]*spec.Operation, 150), - allSchemas: make(map[string]SchemaRef, 150), - allOfs: make(map[string]SchemaRef, 150), - references: referenceAnalysis{ - schemas: make(map[string]spec.Ref, 150), - responses: make(map[string]spec.Ref, 150), - parameters: make(map[string]spec.Ref, 150), - items: make(map[string]spec.Ref, 150), - allRefs: make(map[string]spec.Ref, 150), - }, - } - a.references.referenced.schemas = make(map[string]SchemaRef, 150) - a.references.referenced.responses = make(map[string]*spec.Response, 150) - a.references.referenced.parameters = make(map[string]*spec.Parameter, 150) - a.initialize() - return a -} - -// Spec takes a swagger spec object and turns it into a registry -// with a bunch of utility methods to act on the information in the spec -type Spec struct { - spec *spec.Swagger - consumes map[string]struct{} - produces map[string]struct{} - authSchemes map[string]struct{} - operations map[string]map[string]*spec.Operation - references referenceAnalysis - allSchemas map[string]SchemaRef - allOfs map[string]SchemaRef -} - -func (s *Spec) initialize() { - for _, c := range s.spec.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range s.spec.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range s.spec.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - for path, pathItem := range s.AllPaths() { - s.analyzeOperations(path, &pathItem) - } - - for name, parameter := range s.spec.Parameters { - refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) - if parameter.Items != nil { - s.analyzeItems("items", parameter.Items, refPref) - } - if parameter.In == "body" && parameter.Schema != nil { - s.analyzeSchema("schema", *parameter.Schema, refPref) - } - } - - for name, response := range s.spec.Responses { - refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) - for _, v := range response.Headers { - if v.Items != nil { - s.analyzeItems("items", v.Items, refPref) - } - } - if response.Schema != nil { - s.analyzeSchema("schema", *response.Schema, refPref) - } - } - - for name, schema := range s.spec.Definitions { - s.analyzeSchema(name, schema, "/definitions") - } - // TODO: after analyzing all things and flattening schemas etc - // resolve all the collected references to their final representations - // best put in a separate method because this could get expensive -} - -func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { - // TODO: resolve refs here? - op := pi - s.analyzeOperation("GET", path, op.Get) - s.analyzeOperation("PUT", path, op.Put) - s.analyzeOperation("POST", path, op.Post) - s.analyzeOperation("PATCH", path, op.Patch) - s.analyzeOperation("DELETE", path, op.Delete) - s.analyzeOperation("HEAD", path, op.Head) - s.analyzeOperation("OPTIONS", path, op.Options) - for i, param := range op.Parameters { - refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - if param.Items != nil { - s.analyzeItems("items", param.Items, refPref) - } - if param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } -} - -func (s *Spec) analyzeItems(name string, items *spec.Items, prefix string) { - if items == nil { - return - } - refPref := slashpath.Join(prefix, name) - s.analyzeItems(name, items.Items, refPref) - if items.Ref.String() != "" { - s.references.addItemsRef(refPref, items) - } -} - -func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { - if op == nil { - return - } - - for _, c := range op.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range op.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range op.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - if _, ok := s.operations[method]; !ok { - s.operations[method] = make(map[string]*spec.Operation) - } - s.operations[method][path] = op - prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) - for i, param := range op.Parameters { - refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - s.analyzeItems("items", param.Items, refPref) - if param.In == "body" && param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } - if op.Responses != nil { - if op.Responses.Default != nil { - refPref := slashpath.Join(prefix, "responses", "default") - if op.Responses.Default.Ref.String() != "" { - s.references.addResponseRef(refPref, op.Responses.Default) - } - for _, v := range op.Responses.Default.Headers { - s.analyzeItems("items", v.Items, refPref) - } - if op.Responses.Default.Schema != nil { - s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref) - } - } - for k, res := range op.Responses.StatusCodeResponses { - refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, &res) - } - for _, v := range res.Headers { - s.analyzeItems("items", v.Items, refPref) - } - if res.Schema != nil { - s.analyzeSchema("schema", *res.Schema, refPref) - } - } - } -} - -func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) { - refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) - schRef := SchemaRef{ - Name: name, - Schema: &schema, - Ref: spec.MustCreateRef("#" + refURI), - } - s.allSchemas["#"+refURI] = schRef - if schema.Ref.String() != "" { - s.references.addSchemaRef(refURI, schRef) - } - for k, v := range schema.Definitions { - s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions")) - } - for k, v := range schema.Properties { - s.analyzeSchema(k, v, slashpath.Join(refURI, "properties")) - } - for k, v := range schema.PatternProperties { - s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties")) - } - for i, v := range schema.AllOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) - } - if len(schema.AllOf) > 0 { - s.allOfs["#"+refURI] = SchemaRef{Name: name, Schema: &schema, Ref: spec.MustCreateRef("#" + refURI)} - } - for i, v := range schema.AnyOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) - } - for i, v := range schema.OneOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) - } - if schema.Not != nil { - s.analyzeSchema("not", *schema.Not, refURI) - } - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI) - } - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI) - } - if schema.Items != nil { - if schema.Items.Schema != nil { - s.analyzeSchema("items", *schema.Items.Schema, refURI) - } - for i, sch := range schema.Items.Schemas { - s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) - } - } -} - -// SecurityRequirement is a representation of a security requirement for an operation -type SecurityRequirement struct { - Name string - Scopes []string -} - -// SecurityRequirementsFor gets the security requirements for the operation -func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement { - if s.spec.Security == nil && operation.Security == nil { - return nil - } - - schemes := s.spec.Security - if operation.Security != nil { - schemes = operation.Security - } - - unique := make(map[string]SecurityRequirement) - for _, scheme := range schemes { - for k, v := range scheme { - if _, ok := unique[k]; !ok { - unique[k] = SecurityRequirement{Name: k, Scopes: v} - } - } - } - - var result []SecurityRequirement - for _, v := range unique { - result = append(result, v) - } - return result -} - -// SecurityDefinitionsFor gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { - requirements := s.SecurityRequirementsFor(operation) - if len(requirements) == 0 { - return nil - } - result := make(map[string]spec.SecurityScheme) - for _, v := range requirements { - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - return result -} - -// ConsumesFor gets the mediatypes for the operation -func (s *Spec) ConsumesFor(operation *spec.Operation) []string { - - if len(operation.Consumes) == 0 { - cons := make(map[string]struct{}, len(s.spec.Consumes)) - for _, k := range s.spec.Consumes { - cons[k] = struct{}{} - } - return s.structMapKeys(cons) - } - - cons := make(map[string]struct{}, len(operation.Consumes)) - for _, c := range operation.Consumes { - cons[c] = struct{}{} - } - return s.structMapKeys(cons) -} - -// ProducesFor gets the mediatypes for the operation -func (s *Spec) ProducesFor(operation *spec.Operation) []string { - if len(operation.Produces) == 0 { - prod := make(map[string]struct{}, len(s.spec.Produces)) - for _, k := range s.spec.Produces { - prod[k] = struct{}{} - } - return s.structMapKeys(prod) - } - - prod := make(map[string]struct{}, len(operation.Produces)) - for _, c := range operation.Produces { - prod[c] = struct{}{} - } - return s.structMapKeys(prod) -} - -func mapKeyFromParam(param *spec.Parameter) string { - return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) -} - -func fieldNameFromParam(param *spec.Parameter) string { - if nm, ok := param.Extensions.GetString("go-name"); ok { - return nm - } - return swag.ToGoName(param.Name) -} - -func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) { - for _, param := range parameters { - pr := param - if pr.Ref.String() != "" { - obj, _, err := pr.Ref.GetPointer().Get(s.spec) - if err != nil { - panic(err) - } - pr = obj.(spec.Parameter) - } - res[mapKeyFromParam(&pr)] = pr - } -} - -// ParametersFor the specified operation id -func (s *Spec) ParametersFor(operationID string) []spec.Parameter { - gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { - bag := make(map[string]spec.Parameter) - s.paramsAsMap(pi.Parameters, bag) - s.paramsAsMap(op.Parameters, bag) - - var res []spec.Parameter - for _, v := range bag { - res = append(res, v) - } - return res - } - for _, pi := range s.spec.Paths.Paths { - if pi.Get != nil && pi.Get.ID == operationID { - return gatherParams(&pi, pi.Get) - } - if pi.Head != nil && pi.Head.ID == operationID { - return gatherParams(&pi, pi.Head) - } - if pi.Options != nil && pi.Options.ID == operationID { - return gatherParams(&pi, pi.Options) - } - if pi.Post != nil && pi.Post.ID == operationID { - return gatherParams(&pi, pi.Post) - } - if pi.Patch != nil && pi.Patch.ID == operationID { - return gatherParams(&pi, pi.Patch) - } - if pi.Put != nil && pi.Put.ID == operationID { - return gatherParams(&pi, pi.Put) - } - if pi.Delete != nil && pi.Delete.ID == operationID { - return gatherParams(&pi, pi.Delete) - } - } - return nil -} - -// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { - res := make(map[string]spec.Parameter) - if pi, ok := s.spec.Paths.Paths[path]; ok { - s.paramsAsMap(pi.Parameters, res) - s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res) - } - return res -} - -// OperationForName gets the operation for the given id -func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { - for method, pathItem := range s.operations { - for path, op := range pathItem { - if operationID == op.ID { - return method, path, op, true - } - } - } - return "", "", nil, false -} - -// OperationFor the given method and path -func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { - if mp, ok := s.operations[strings.ToUpper(method)]; ok { - op, fn := mp[path] - return op, fn - } - return nil, false -} - -// Operations gathers all the operations specified in the spec document -func (s *Spec) Operations() map[string]map[string]*spec.Operation { - return s.operations -} - -func (s *Spec) structMapKeys(mp map[string]struct{}) []string { - if len(mp) == 0 { - return nil - } - - result := make([]string, 0, len(mp)) - for k := range mp { - result = append(result, k) - } - return result -} - -// AllPaths returns all the paths in the swagger spec -func (s *Spec) AllPaths() map[string]spec.PathItem { - if s.spec == nil || s.spec.Paths == nil { - return nil - } - return s.spec.Paths.Paths -} - -// OperationIDs gets all the operation ids based on method an dpath -func (s *Spec) OperationIDs() []string { - if len(s.operations) == 0 { - return nil - } - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p, o := range v { - if o.ID != "" { - result = append(result, o.ID) - } else { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - } - return result -} - -// RequiredConsumes gets all the distinct consumes that are specified in the specification document -func (s *Spec) RequiredConsumes() []string { - return s.structMapKeys(s.consumes) -} - -// RequiredProduces gets all the distinct produces that are specified in the specification document -func (s *Spec) RequiredProduces() []string { - return s.structMapKeys(s.produces) -} - -// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec -func (s *Spec) RequiredSecuritySchemes() []string { - return s.structMapKeys(s.authSchemes) -} - -// SchemaRef is a reference to a schema -type SchemaRef struct { - Name string - Ref spec.Ref - Schema *spec.Schema -} - -// SchemasWithAllOf returns schema references to all schemas that are defined -// with an allOf key -func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { - for _, v := range s.allOfs { - result = append(result, v) - } - return -} - -// AllDefinitions returns schema references for all the definitions that were discovered -func (s *Spec) AllDefinitions() (result []SchemaRef) { - for _, v := range s.allSchemas { - result = append(result, v) - } - return -} - -// AllDefinitionReferences returns json refs for all the discovered schemas -func (s *Spec) AllDefinitionReferences() (result []string) { - for _, v := range s.references.schemas { - result = append(result, v.String()) - } - return -} - -// AllParameterReferences returns json refs for all the discovered parameters -func (s *Spec) AllParameterReferences() (result []string) { - for _, v := range s.references.parameters { - result = append(result, v.String()) - } - return -} - -// AllResponseReferences returns json refs for all the discovered responses -func (s *Spec) AllResponseReferences() (result []string) { - for _, v := range s.references.responses { - result = append(result, v.String()) - } - return -} - -// AllItemsReferences returns the references for all the items -func (s *Spec) AllItemsReferences() (result []string) { - for _, v := range s.references.items { - result = append(result, v.String()) - } - return -} - -// AllReferences returns all the references found in the document -func (s *Spec) AllReferences() (result []string) { - for _, v := range s.references.allRefs { - result = append(result, v.String()) - } - return -} - -// AllRefs returns all the unique references found in the document -func (s *Spec) AllRefs() (result []spec.Ref) { - set := make(map[string]struct{}) - for _, v := range s.references.allRefs { - a := v.String() - if a == "" { - continue - } - if _, ok := set[a]; !ok { - set[a] = struct{}{} - result = append(result, v) - } - } - return -} diff --git a/vendor/github.com/go-openapi/loads/.drone.yml b/vendor/github.com/go-openapi/loads/.drone.yml deleted file mode 100644 index 982291035..000000000 --- a/vendor/github.com/go-openapi/loads/.drone.yml +++ /dev/null @@ -1,39 +0,0 @@ -clone: - path: github.com/go-openapi/loads - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - environment: - GOCOVMODE: "count" - commands: - - go get -u github.com/axw/gocov/gocov - - go get -u gopkg.in/matm/v1/gocov-html - - go get -u github.com/cee-dub/go-junit-report - - go get -u github.com/stretchr/testify/assert - - go get -u gopkg.in/yaml.v2 - - go get -u github.com/go-openapi/swag - - go get -u github.com/go-openapi/analysis - - go get -u github.com/go-openapi/spec - - ./hack/build-drone.sh - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore deleted file mode 100644 index e4f15f17b..000000000 --- a/vendor/github.com/go-openapi/loads/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -secrets.yml -coverage.out -profile.cov -profile.out diff --git a/vendor/github.com/go-openapi/loads/.pullapprove.yml b/vendor/github.com/go-openapi/loads/.pullapprove.yml deleted file mode 100644 index 5ec183e22..000000000 --- a/vendor/github.com/go-openapi/loads/.pullapprove.yml +++ /dev/null @@ -1,13 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - chancez - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md deleted file mode 100644 index 9d5c89997..000000000 --- a/vendor/github.com/go-openapi/loads/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Loads OAI specs [![Build Status](https://ci.vmware.run/api/badges/go-openapi/loads/status.svg)](https://ci.vmware.run/go-openapi/loads) [![Coverage](https://coverage.vmware.run/badges/go-openapi/loads/coverage.svg)](https://coverage.vmware.run/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) - -Loading of OAI specification documents from local or remote locations. diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go deleted file mode 100644 index ff1ee1c9f..000000000 --- a/vendor/github.com/go-openapi/loads/spec.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package loads - -import ( - "encoding/json" - "fmt" - "net/url" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// JSONDoc loads a json document from either a file or a remote url -func JSONDoc(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// DocLoader represents a doc loader type -type DocLoader func(string) (json.RawMessage, error) - -// DocMatcher represents a predicate to check if a loader matches -type DocMatcher func(string) bool - -var loaders = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc} - -// AddLoader for a document -func AddLoader(predicate DocMatcher, load DocLoader) { - prev := loaders - loaders = &loader{ - Match: predicate, - Fn: load, - Next: prev, - } - -} - -type loader struct { - Fn DocLoader - Match DocMatcher - Next *loader -} - -// JSONSpec loads a spec from a json document -func JSONSpec(path string) (*Document, error) { - data, err := JSONDoc(path) - if err != nil { - return nil, err - } - // convert to json - return Analyzed(json.RawMessage(data), "") -} - -// Document represents a swagger spec document -type Document struct { - // specAnalyzer - Analyzer *analysis.Spec - spec *spec.Swagger - origSpec *spec.Swagger - schema *spec.Schema - raw json.RawMessage -} - -// Spec loads a new spec document -func Spec(path string) (*Document, error) { - specURL, err := url.Parse(path) - if err != nil { - return nil, err - } - for l := loaders.Next; l != nil; l = l.Next { - if loaders.Match(specURL.Path) { - b, err2 := loaders.Fn(path) - if err2 != nil { - return nil, err2 - } - return Analyzed(b, "") - } - } - b, err := loaders.Fn(path) - if err != nil { - return nil, err - } - return Analyzed(b, "") -} - -var swag20Schema = spec.MustLoadSwagger20Schema() - -// Analyzed creates a new analyzed spec document -func Analyzed(data json.RawMessage, version string) (*Document, error) { - if version == "" { - version = "2.0" - } - if version != "2.0" { - return nil, fmt.Errorf("spec version %q is not supported", version) - } - - swspec := new(spec.Swagger) - if err := json.Unmarshal(data, swspec); err != nil { - return nil, err - } - - origsqspec := new(spec.Swagger) - if err := json.Unmarshal(data, origsqspec); err != nil { - return nil, err - } - - d := &Document{ - Analyzer: analysis.New(swspec), - schema: swag20Schema, - spec: swspec, - raw: data, - origSpec: origsqspec, - } - return d, nil -} - -// Expanded expands the ref fields in the spec document and returns a new spec document -func (d *Document) Expanded() (*Document, error) { - swspec := new(spec.Swagger) - if err := json.Unmarshal(d.raw, swspec); err != nil { - return nil, err - } - if err := spec.ExpandSpec(swspec); err != nil { - return nil, err - } - - dd := &Document{ - Analyzer: analysis.New(swspec), - spec: swspec, - schema: swag20Schema, - raw: d.raw, - origSpec: d.origSpec, - } - return dd, nil -} - -// BasePath the base path for this spec -func (d *Document) BasePath() string { - return d.spec.BasePath -} - -// Version returns the version of this spec -func (d *Document) Version() string { - return d.spec.Swagger -} - -// Schema returns the swagger 2.0 schema -func (d *Document) Schema() *spec.Schema { - return d.schema -} - -// Spec returns the swagger spec object model -func (d *Document) Spec() *spec.Swagger { - return d.spec -} - -// Host returns the host for the API -func (d *Document) Host() string { - return d.spec.Host -} - -// Raw returns the raw swagger spec as json bytes -func (d *Document) Raw() json.RawMessage { - return d.raw -} - -func (d *Document) OrigSpec() *spec.Swagger { - return d.origSpec -} - -// ResetDefinitions gives a shallow copy with the models reset -func (d *Document) ResetDefinitions() *Document { - defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) - for k, v := range d.origSpec.Definitions { - defs[k] = v - } - - d.spec.Definitions = defs - return d -} - -// Pristine creates a new pristine document instance based on the input data -func (d *Document) Pristine() *Document { - dd, _ := Analyzed(d.Raw(), d.Version()) - return dd -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 000000000..89e07ae19 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,136 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 000000000..f2c6906b9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,155 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/any/any.proto +// DO NOT EDIT! + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/any/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, + 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, + 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, + 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, + 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, + 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, + 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, + 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, + 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, + 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 000000000..81dcf46cc --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,140 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 000000000..c0d595da7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 000000000..65cb0f8eb --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 000000000..569748346 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto +// DO NOT EDIT! + +/* +Package duration is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/duration/duration.proto + +It has these top-level messages: + Duration +*/ +package duration + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 189 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, + 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, + 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, + 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, + 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, + 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, + 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13, + 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, + 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 000000000..96c1796d6 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,98 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh new file mode 100755 index 000000000..2a5b4e8bd --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh @@ -0,0 +1,66 @@ +#!/bin/bash -e +# +# This script fetches and rebuilds the "well-known types" protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. +# You also need Go and Git installed. + +PKG=github.com/golang/protobuf/ptypes +UPSTREAM=https://github.com/google/protobuf +UPSTREAM_SUBDIR=src/google/protobuf +PROTO_FILES=' + any.proto + duration.proto + empty.proto + struct.proto + timestamp.proto + wrappers.proto +' + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go git protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) +trap 'rm -rf $tmpdir' EXIT + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd $base + +echo 1>&2 "fetching latest protos... " +git clone -q $UPSTREAM $tmpdir +# Pass 1: build mapping from upstream filename to our filename. +declare -A filename_map +for f in $(cd $PKG && find * -name '*.proto'); do + echo -n 1>&2 "looking for latest version of $f... " + up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/) + echo 1>&2 $up + if [ $(echo $up | wc -w) != "1" ]; then + die "not exactly one match" + fi + filename_map[$up]=$f +done +# Pass 2: copy files +for up in "${!filename_map[@]}"; do + f=${filename_map[$up]} + shortname=$(basename $f | sed 's,\.proto$,,') + cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f +done + +# Run protoc once per package. +for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do + echo 1>&2 "* $dir" + protoc --go_out=. $dir/*.proto +done +echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 000000000..1b3657622 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 000000000..ffcc51594 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,127 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +// DO NOT EDIT! + +/* +Package timestamp is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +It has these top-level messages: + Timestamp +*/ +package timestamp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 194 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, + 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, + 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, + 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, + 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, + 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, + 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, + 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, + 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, + 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 000000000..7992a8588 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,111 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE new file mode 100644 index 000000000..6b0b1270f --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go new file mode 100644 index 000000000..dc431e1ec --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -0,0 +1,6944 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v2 + +import ( + "fmt" + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v2" + "strings" +) + +func Version() string { + return "openapi_v2" +} + +func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewSchema(m, compiler.NewContext("schema", context)) + if matching_error == nil { + x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + // bool boolean = 2; + boolValue, ok := in.(bool) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewAny(in interface{}, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes, _ := yaml.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { + errors := make([]error, 0) + x := &ApiKeySecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "type"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [apiKey] + if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header query] + if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { + errors := make([]error, 0) + x := &BasicAuthenticationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "type"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [basic] + if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { + errors := make([]error, 0) + x := &BodyParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "schema"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "required", "schema"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [body] + if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema schema = 5; + v5 := compiler.MapValueForKey(m, "schema") + if v5 != nil { + var err error + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { + errors := make([]error, 0) + x := &Default{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { + errors := make([]error, 0) + x := &Definitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "paths", "swagger"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string swagger = 1; + v1 := compiler.MapValueForKey(m, "swagger") + if v1 != nil { + x.Swagger, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [2.0] + if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) + if err != nil { + errors = append(errors, err) + } + } + // string host = 3; + v3 := compiler.MapValueForKey(m, "host") + if v3 != nil { + x.Host, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string base_path = 4; + v4 := compiler.MapValueForKey(m, "basePath") + if v4 != nil { + x.BasePath, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string schemes = 5; + v5 := compiler.MapValueForKey(m, "schemes") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 6; + v6 := compiler.MapValueForKey(m, "consumes") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 7; + v7 := compiler.MapValueForKey(m, "produces") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Paths paths = 8; + v8 := compiler.MapValueForKey(m, "paths") + if v8 != nil { + var err error + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) + if err != nil { + errors = append(errors, err) + } + } + // Definitions definitions = 9; + v9 := compiler.MapValueForKey(m, "definitions") + if v9 != nil { + var err error + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // ParameterDefinitions parameters = 10; + v10 := compiler.MapValueForKey(m, "parameters") + if v10 != nil { + var err error + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponseDefinitions responses = 11; + v11 := compiler.MapValueForKey(m, "responses") + if v11 != nil { + var err error + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // SecurityDefinitions security_definitions = 13; + v13 := compiler.MapValueForKey(m, "securityDefinitions") + if v13 != nil { + var err error + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Tag tags = 14; + v14 := compiler.MapValueForKey(m, "tags") + if v14 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := v14.([]interface{}) + if ok { + for _, item := range a { + y, err := NewTag(item, compiler.NewContext("tags", context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 15; + v15 := compiler.MapValueForKey(m, "externalDocs") + if v15 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 16; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { + errors := make([]error, 0) + x := &Examples{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { + errors := make([]error, 0) + x := &FileSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string format = 1; + v1 := compiler.MapValueForKey(m, "format") + if v1 != nil { + x.Format, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 2; + v2 := compiler.MapValueForKey(m, "title") + if v2 != nil { + x.Title, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 4; + v4 := compiler.MapValueForKey(m, "default") + if v4 != nil { + var err error + x.Default, err = NewAny(v4, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string required = 5; + v5 := compiler.MapValueForKey(m, "required") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [file] + if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 7; + v7 := compiler.MapValueForKey(m, "readOnly") + if v7 != nil { + x.ReadOnly, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { + errors := make([]error, 0) + x := &FormDataParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [formData] + if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array file] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 18; + v18 := compiler.MapValueForKey(m, "description") + if v18 != nil { + x.Description, ok = v18.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 19; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { + errors := make([]error, 0) + x := &HeaderParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header] + if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { + errors := make([]error, 0) + x := &Headers{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeader additional_properties = 1; + // MAP: Header + x.AdditionalProperties = make([]*NamedHeader, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + pair := &NamedHeader{} + pair.Name = k + var err error + pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string version = 2; + v2 := compiler.MapValueForKey(m, "version") + if v2 != nil { + x.Version, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 4; + v4 := compiler.MapValueForKey(m, "termsOfService") + if v4 != nil { + x.TermsOfService, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 5; + v5 := compiler.MapValueForKey(m, "contact") + if v5 != nil { + var err error + x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 6; + v6 := compiler.MapValueForKey(m, "license") + if v6 != nil { + var err error + x.License, err = NewLicense(v6, compiler.NewContext("license", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Schema = make([]*Schema, 0) + y, err := NewSchema(m, compiler.NewContext("", context)) + if err != nil { + return nil, err + } + x.Schema = append(x.Schema, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { + errors := make([]error, 0) + x := &JsonReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"$ref", "description"} + allowedPatterns := []string{} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewLicense(in interface{}, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + allowedPatterns := []string{} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { + errors := make([]error, 0) + x := &NamedHeader{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + allowedPatterns := []string{} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Header value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { + errors := make([]error, 0) + x := &NamedParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + allowedPatterns := []string{} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Parameter value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + allowedPatterns := []string{} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { + errors := make([]error, 0) + x := &NamedResponse{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + allowedPatterns := []string{} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Response value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { + errors := make([]error, 0) + x := &NamedResponseValue{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + allowedPatterns := []string{} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseValue value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { + errors := make([]error, 0) + x := &NamedSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + allowedPatterns := []string{} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &NamedSecurityDefinitionsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + allowedPatterns := []string{} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecurityDefinitionsItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + allowedPatterns := []string{} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + allowedPatterns := []string{} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { + errors := make([]error, 0) + x := &NonBodyParameter{} + matched := false + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // HeaderParameterSubSchema header_parameter_sub_schema = 1; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) + if matching_error == nil { + x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) + if matching_error == nil { + x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + // QueryParameterSubSchema query_parameter_sub_schema = 3; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) + if matching_error == nil { + x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + // PathParameterSubSchema path_parameter_sub_schema = 4; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) + if matching_error == nil { + x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { + errors := make([]error, 0) + x := &Oauth2AccessCodeSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [accessCode] + if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 5; + v5 := compiler.MapValueForKey(m, "tokenUrl") + if v5 != nil { + x.TokenUrl, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 6; + v6 := compiler.MapValueForKey(m, "description") + if v6 != nil { + x.Description, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ApplicationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [application] + if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ImplicitSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [implicit] + if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { + errors := make([]error, 0) + x := &Oauth2PasswordSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [password] + if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { + errors := make([]error, 0) + x := &Oauth2Scopes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + pair := &NamedString{} + pair.Name = k + pair.Value = v.(string) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := v1.([]interface{}) + if ok { + x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 6; + v6 := compiler.MapValueForKey(m, "produces") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 7; + v7 := compiler.MapValueForKey(m, "consumes") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParametersItem parameters = 8; + v8 := compiler.MapValueForKey(m, "parameters") + if v8 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v8.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // Responses responses = 9; + v9 := compiler.MapValueForKey(m, "responses") + if v9 != nil { + var err error + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string schemes = 10; + v10 := compiler.MapValueForKey(m, "schemes") + if v10 != nil { + v, ok := v10.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 11; + v11 := compiler.MapValueForKey(m, "deprecated") + if v11 != nil { + x.Deprecated, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated NamedAny vendor_extension = 13; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + matched := false + // BodyParameter body_parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) + if matching_error == nil { + x.Oneof = &Parameter_BodyParameter{BodyParameter: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + // NonBodyParameter non_body_parameter = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) + if matching_error == nil { + x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { + errors := make([]error, 0) + x := &ParameterDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameter additional_properties = 1; + // MAP: Parameter + x.AdditionalProperties = make([]*NamedParameter, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + pair := &NamedParameter{} + pair.Name = k + var err error + pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { + errors := make([]error, 0) + x := &ParametersItem{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewParameter(m, compiler.NewContext("parameter", context)) + if matching_error == nil { + x.Oneof = &ParametersItem_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matching_error == nil { + x.Oneof = &ParametersItem_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 2; + v2 := compiler.MapValueForKey(m, "get") + if v2 != nil { + var err error + x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 3; + v3 := compiler.MapValueForKey(m, "put") + if v3 != nil { + var err error + x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 4; + v4 := compiler.MapValueForKey(m, "post") + if v4 != nil { + var err error + x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 5; + v5 := compiler.MapValueForKey(m, "delete") + if v5 != nil { + var err error + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 6; + v6 := compiler.MapValueForKey(m, "options") + if v6 != nil { + var err error + x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 7; + v7 := compiler.MapValueForKey(m, "head") + if v7 != nil { + var err error + x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 8; + v8 := compiler.MapValueForKey(m, "patch") + if v8 != nil { + var err error + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated ParametersItem parameters = 9; + v9 := compiler.MapValueForKey(m, "parameters") + if v9 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v9.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { + errors := make([]error, 0) + x := &PathParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"required"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [path] + if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []string{"^x-", "^/"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedAny vendor_extension = 1; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + // repeated NamedPathItem path = 2; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^/", k) { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { + errors := make([]error, 0) + x := &PrimitivesItems{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 18; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { + errors := make([]error, 0) + x := &QueryParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [query] + if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "examples", "headers", "schema"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaItem schema = 2; + v2 := compiler.MapValueForKey(m, "schema") + if v2 != nil { + var err error + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // Headers headers = 3; + v3 := compiler.MapValueForKey(m, "headers") + if v3 != nil { + var err error + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) + if err != nil { + errors = append(errors, err) + } + } + // Examples examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { + errors := make([]error, 0) + x := &ResponseDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponse additional_properties = 1; + // MAP: Response + x.AdditionalProperties = make([]*NamedResponse, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + pair := &NamedResponse{} + pair.Name = k + var err error + pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { + errors := make([]error, 0) + x := &ResponseValue{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewResponse(m, compiler.NewContext("response", context)) + if matching_error == nil { + x.Oneof = &ResponseValue_Response{Response: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matching_error == nil { + x.Oneof = &ResponseValue_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []string{"^([0-9]{3})$|^(default)$", "^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedResponseValue response_code = 1; + // MAP: ResponseValue ^([0-9]{3})$|^(default)$ + x.ResponseCode = make([]*NamedResponseValue, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^([0-9]{3})$|^(default)$", k) { + pair := &NamedResponseValue{} + pair.Name = k + var err error + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseCode = append(x.ResponseCode, pair) + } + } + } + // repeated NamedAny vendor_extension = 2; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 3; + v3 := compiler.MapValueForKey(m, "title") + if v3 != nil { + x.Title, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float multiple_of = 6; + v6 := compiler.MapValueForKey(m, "multipleOf") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.MultipleOf = v6 + case float32: + x.MultipleOf = float64(v6) + case uint64: + x.MultipleOf = float64(v6) + case uint32: + x.MultipleOf = float64(v6) + case int64: + x.MultipleOf = float64(v6) + case int32: + x.MultipleOf = float64(v6) + case int: + x.MultipleOf = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 7; + v7 := compiler.MapValueForKey(m, "maximum") + if v7 != nil { + switch v7 := v7.(type) { + case float64: + x.Maximum = v7 + case float32: + x.Maximum = float64(v7) + case uint64: + x.Maximum = float64(v7) + case uint32: + x.Maximum = float64(v7) + case int64: + x.Maximum = float64(v7) + case int32: + x.Maximum = float64(v7) + case int: + x.Maximum = float64(v7) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 8; + v8 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v8 != nil { + x.ExclusiveMaximum, ok = v8.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 9; + v9 := compiler.MapValueForKey(m, "minimum") + if v9 != nil { + switch v9 := v9.(type) { + case float64: + x.Minimum = v9 + case float32: + x.Minimum = float64(v9) + case uint64: + x.Minimum = float64(v9) + case uint32: + x.Minimum = float64(v9) + case int64: + x.Minimum = float64(v9) + case int32: + x.Minimum = float64(v9) + case int: + x.Minimum = float64(v9) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 10; + v10 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v10 != nil { + x.ExclusiveMinimum, ok = v10.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 11; + v11 := compiler.MapValueForKey(m, "maxLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 12; + v12 := compiler.MapValueForKey(m, "minLength") + if v12 != nil { + t, ok := v12.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 13; + v13 := compiler.MapValueForKey(m, "pattern") + if v13 != nil { + x.Pattern, ok = v13.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 14; + v14 := compiler.MapValueForKey(m, "maxItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 15; + v15 := compiler.MapValueForKey(m, "minItems") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 16; + v16 := compiler.MapValueForKey(m, "uniqueItems") + if v16 != nil { + x.UniqueItems, ok = v16.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 17; + v17 := compiler.MapValueForKey(m, "maxProperties") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 18; + v18 := compiler.MapValueForKey(m, "minProperties") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 19; + v19 := compiler.MapValueForKey(m, "required") + if v19 != nil { + v, ok := v19.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // AdditionalPropertiesItem additional_properties = 21; + v21 := compiler.MapValueForKey(m, "additionalProperties") + if v21 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) + if err != nil { + errors = append(errors, err) + } + } + // TypeItem type = 22; + v22 := compiler.MapValueForKey(m, "type") + if v22 != nil { + var err error + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 23; + v23 := compiler.MapValueForKey(m, "items") + if v23 != nil { + var err error + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Schema all_of = 24; + v24 := compiler.MapValueForKey(m, "allOf") + if v24 != nil { + // repeated Schema + x.AllOf = make([]*Schema, 0) + a, ok := v24.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSchema(item, compiler.NewContext("allOf", context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // Properties properties = 25; + v25 := compiler.MapValueForKey(m, "properties") + if v25 != nil { + var err error + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) + if err != nil { + errors = append(errors, err) + } + } + // string discriminator = 26; + v26 := compiler.MapValueForKey(m, "discriminator") + if v26 != nil { + x.Discriminator, ok = v26.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 27; + v27 := compiler.MapValueForKey(m, "readOnly") + if v27 != nil { + x.ReadOnly, ok = v27.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 28; + v28 := compiler.MapValueForKey(m, "xml") + if v28 != nil { + var err error + x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 29; + v29 := compiler.MapValueForKey(m, "externalDocs") + if v29 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 30; + v30 := compiler.MapValueForKey(m, "example") + if v30 != nil { + var err error + x.Example, err = NewAny(v30, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 31; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { + errors := make([]error, 0) + x := &SchemaItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewSchema(m, compiler.NewContext("schema", context)) + if matching_error == nil { + x.Oneof = &SchemaItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + // FileSchema file_schema = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewFileSchema(m, compiler.NewContext("fileSchema", context)) + if matching_error == nil { + x.Oneof = &SchemaItem_FileSchema{FileSchema: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { + errors := make([]error, 0) + x := &SecurityDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecurityDefinitionsItem additional_properties = 1; + // MAP: SecurityDefinitionsItem + x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + pair := &NamedSecurityDefinitionsItem{} + pair.Name = k + var err error + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &SecurityDefinitionsItem{} + matched := false + // BasicAuthenticationSecurity basic_authentication_security = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) + if matching_error == nil { + x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + // ApiKeySecurity api_key_security = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) + if matching_error == nil { + x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + // Oauth2ImplicitSecurity oauth2_implicit_security = 3; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) + if matching_error == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + // Oauth2PasswordSecurity oauth2_password_security = 4; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) + if matching_error == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + // Oauth2ApplicationSecurity oauth2_application_security = 5; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) + if matching_error == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matching_error := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) + if matching_error == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} + matched = true + } else { + errors = append(errors, matching_error) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + a, ok := in.([]interface{}) + if !ok { + message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Value = make([]string, 0) + for _, s := range a { + x.Value = append(x.Value, s.(string)) + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { + errors := make([]error, 0) + x := &TypeItem{} + switch in := in.(type) { + case string: + x.Value = make([]string, 0) + x.Value = append(x.Value, in) + case []interface{}: + x.Value = make([]string, 0) + for _, v := range in { + value, ok := v.(string) + if ok { + x.Value = append(x.Value, value) + } else { + message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) + errors = append(errors, compiler.NewError(context, message)) + } + } + default: + message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { + errors := make([]error, 0) + x := &VendorExtension{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []string{"^x-"} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := item.Key.(string) + if ok { + v := item.Value + if compiler.PatternMatches("^x-", k) { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Any) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Contact) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Default) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Definitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Document) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Definitions != nil { + _, err := m.Definitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.SecurityDefinitions != nil { + _, err := m.SecurityDefinitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Examples) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Header) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Headers) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Info) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.Schema { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewJsonReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *License) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *NamedString) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) + if ok { + _, err := p.HeaderParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) + if ok { + _, err := p.FormDataParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) + if ok { + _, err := p.QueryParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) + if ok { + _, err := p.PathParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Operation) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Parameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*Parameter_BodyParameter) + if ok { + _, err := p.BodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*Parameter_NonBodyParameter) + if ok { + _, err := p.NonBodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParametersItem_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParametersItem_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewParametersItem(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *PathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Paths) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Properties) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Response) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseValue_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseValue_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewResponseValue(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Responses) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.ResponseCode { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Schema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewSchema(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Type != nil { + _, err := m.Type.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaItem_FileSchema) + if ok { + _, err := p.FileSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) + if ok { + _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) + if ok { + _, err := p.ApiKeySecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) + if ok { + _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) + if ok { + _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) + if ok { + _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) + if ok { + _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *StringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Tag) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +func (m *Xml) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go new file mode 100644 index 000000000..c815ae969 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -0,0 +1,4456 @@ +// Code generated by protoc-gen-go. +// source: OpenAPIv2/OpenAPIv2.proto +// DO NOT EDIT! + +/* +Package openapi_v2 is a generated protocol buffer package. + +It is generated from these files: + OpenAPIv2/OpenAPIv2.proto + +It has these top-level messages: + AdditionalPropertiesItem + Any + ApiKeySecurity + BasicAuthenticationSecurity + BodyParameter + Contact + Default + Definitions + Document + Examples + ExternalDocs + FileSchema + FormDataParameterSubSchema + Header + HeaderParameterSubSchema + Headers + Info + ItemsItem + JsonReference + License + NamedAny + NamedHeader + NamedParameter + NamedPathItem + NamedResponse + NamedResponseValue + NamedSchema + NamedSecurityDefinitionsItem + NamedString + NamedStringArray + NonBodyParameter + Oauth2AccessCodeSecurity + Oauth2ApplicationSecurity + Oauth2ImplicitSecurity + Oauth2PasswordSecurity + Oauth2Scopes + Operation + Parameter + ParameterDefinitions + ParametersItem + PathItem + PathParameterSubSchema + Paths + PrimitivesItems + Properties + QueryParameterSubSchema + Response + ResponseDefinitions + ResponseValue + Responses + Schema + SchemaItem + SecurityDefinitions + SecurityDefinitionsItem + SecurityRequirement + StringArray + Tag + TypeItem + VendorExtension + Xml +*/ +package openapi_v2 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AdditionalPropertiesItem struct { + // Types that are valid to be assigned to Oneof: + // *AdditionalPropertiesItem_Schema + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } +func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } +func (*AdditionalPropertiesItem) ProtoMessage() {} +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` +} + +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } +} + +func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *AdditionalPropertiesItem_Boolean: + t := uint64(0) + if x.Boolean { + t = 1 + } + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AdditionalPropertiesItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &AdditionalPropertiesItem_Schema{msg} + return true, err + case 2: // oneof.boolean + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} + return true, err + default: + return false, nil + } +} + +func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AdditionalPropertiesItem_Boolean: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Any struct { + Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Any) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +func (m *Any) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +type ApiKeySecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } +func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } +func (*ApiKeySecurity) ProtoMessage() {} +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ApiKeySecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ApiKeySecurity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ApiKeySecurity) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *ApiKeySecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BasicAuthenticationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } +func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } +func (*BasicAuthenticationSecurity) ProtoMessage() {} +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *BasicAuthenticationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BodyParameter struct { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BodyParameter) Reset() { *m = BodyParameter{} } +func (m *BodyParameter) String() string { return proto.CompactTextString(m) } +func (*BodyParameter) ProtoMessage() {} +func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *BodyParameter) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BodyParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BodyParameter) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *BodyParameter) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *BodyParameter) GetSchema() *Schema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *BodyParameter) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Contact information for the owners of the API. +type Contact struct { + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the contact information. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + // The email address of the contact person/organization. + Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Contact) Reset() { *m = Contact{} } +func (m *Contact) String() string { return proto.CompactTextString(m) } +func (*Contact) ProtoMessage() {} +func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Contact) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Contact) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *Contact) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *Contact) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Default struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Default) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +type Definitions struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Definitions) Reset() { *m = Definitions{} } +func (m *Definitions) String() string { return proto.CompactTextString(m) } +func (*Definitions) ProtoMessage() {} +func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Definitions) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Document struct { + // The Swagger version of this document. + Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + // The host (name or ip) of the API. Example: 'swagger.io' + Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` + // The base path to the API. Example: '/api'. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` + // A list of MIME types accepted by the API. + Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *Document) GetSwagger() string { + if m != nil { + return m.Swagger + } + return "" +} + +func (m *Document) GetInfo() *Info { + if m != nil { + return m.Info + } + return nil +} + +func (m *Document) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Document) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +func (m *Document) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Document) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Document) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Document) GetPaths() *Paths { + if m != nil { + return m.Paths + } + return nil +} + +func (m *Document) GetDefinitions() *Definitions { + if m != nil { + return m.Definitions + } + return nil +} + +func (m *Document) GetParameters() *ParameterDefinitions { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Document) GetResponses() *ResponseDefinitions { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Document) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { + if m != nil { + return m.SecurityDefinitions + } + return nil +} + +func (m *Document) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Document) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Document) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Examples struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Examples) Reset() { *m = Examples{} } +func (m *Examples) String() string { return proto.CompactTextString(m) } +func (*Examples) ProtoMessage() {} +func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *Examples) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// information about external documentation +type ExternalDocs struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } +func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } +func (*ExternalDocs) ProtoMessage() {} +func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ExternalDocs) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ExternalDocs) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ExternalDocs) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type FileSchema struct { + Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FileSchema) Reset() { *m = FileSchema{} } +func (m *FileSchema) String() string { return proto.CompactTextString(m) } +func (*FileSchema) ProtoMessage() {} +func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *FileSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FileSchema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *FileSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FileSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FileSchema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *FileSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FileSchema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *FileSchema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *FileSchema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *FileSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type FormDataParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } +func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*FormDataParameterSubSchema) ProtoMessage() {} +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *FormDataParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *FormDataParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FormDataParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *FormDataParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FormDataParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *FormDataParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *FormDataParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *FormDataParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Header struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *Header) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Header) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Header) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *Header) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *Header) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Header) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Header) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Header) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Header) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Header) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Header) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Header) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Header) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Header) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Header) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Header) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Header) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Header) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Header) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type HeaderParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } +func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*HeaderParameterSubSchema) ProtoMessage() {} +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *HeaderParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *HeaderParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *HeaderParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *HeaderParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *HeaderParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *HeaderParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *HeaderParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *HeaderParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Headers struct { + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Headers) Reset() { *m = Headers{} } +func (m *Headers) String() string { return proto.CompactTextString(m) } +func (*Headers) ProtoMessage() {} +func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *Headers) GetAdditionalProperties() []*NamedHeader { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// General information about the API. +type Info struct { + // A unique and precise title of the API. + Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` + // A semantic version number of the API. + Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The terms of service for the API. + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *Info) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Info) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Info) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Info) GetTermsOfService() string { + if m != nil { + return m.TermsOfService + } + return "" +} + +func (m *Info) GetContact() *Contact { + if m != nil { + return m.Contact + } + return nil +} + +func (m *Info) GetLicense() *License { + if m != nil { + return m.License + } + return nil +} + +func (m *Info) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type ItemsItem struct { + Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` +} + +func (m *ItemsItem) Reset() { *m = ItemsItem{} } +func (m *ItemsItem) String() string { return proto.CompactTextString(m) } +func (*ItemsItem) ProtoMessage() {} +func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *ItemsItem) GetSchema() []*Schema { + if m != nil { + return m.Schema + } + return nil +} + +type JsonReference struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=ref" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *JsonReference) Reset() { *m = JsonReference{} } +func (m *JsonReference) String() string { return proto.CompactTextString(m) } +func (*JsonReference) ProtoMessage() {} +func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *JsonReference) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *JsonReference) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type License struct { + // The name of the license type. It's encouraged to use an OSI compatible license. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the license. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *License) Reset() { *m = License{} } +func (m *License) String() string { return proto.CompactTextString(m) } +func (*License) ProtoMessage() {} +func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *License) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *License) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *License) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedAny) Reset() { *m = NamedAny{} } +func (m *NamedAny) String() string { return proto.CompactTextString(m) } +func (*NamedAny) ProtoMessage() {} +func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *NamedAny) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedAny) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +type NamedHeader struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedHeader) Reset() { *m = NamedHeader{} } +func (m *NamedHeader) String() string { return proto.CompactTextString(m) } +func (*NamedHeader) ProtoMessage() {} +func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *NamedHeader) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedHeader) GetValue() *Header { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +type NamedParameter struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedParameter) Reset() { *m = NamedParameter{} } +func (m *NamedParameter) String() string { return proto.CompactTextString(m) } +func (*NamedParameter) ProtoMessage() {} +func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *NamedParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedParameter) GetValue() *Parameter { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } +func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } +func (*NamedPathItem) ProtoMessage() {} +func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *NamedPathItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedPathItem) GetValue() *PathItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +type NamedResponse struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponse) Reset() { *m = NamedResponse{} } +func (m *NamedResponse) String() string { return proto.CompactTextString(m) } +func (*NamedResponse) ProtoMessage() {} +func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *NamedResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponse) GetValue() *Response { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +type NamedResponseValue struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } +func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } +func (*NamedResponseValue) ProtoMessage() {} +func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *NamedResponseValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponseValue) GetValue() *ResponseValue { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +type NamedSchema struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSchema) Reset() { *m = NamedSchema{} } +func (m *NamedSchema) String() string { return proto.CompactTextString(m) } +func (*NamedSchema) ProtoMessage() {} +func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *NamedSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSchema) GetValue() *Schema { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +type NamedSecurityDefinitionsItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } +func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *NamedSecurityDefinitionsItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedString) Reset() { *m = NamedString{} } +func (m *NamedString) String() string { return proto.CompactTextString(m) } +func (*NamedString) ProtoMessage() {} +func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *NamedString) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } +func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } +func (*NamedStringArray) ProtoMessage() {} +func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *NamedStringArray) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedStringArray) GetValue() *StringArray { + if m != nil { + return m.Value + } + return nil +} + +type NonBodyParameter struct { + // Types that are valid to be assigned to Oneof: + // *NonBodyParameter_HeaderParameterSubSchema + // *NonBodyParameter_FormDataParameterSubSchema + // *NonBodyParameter_QueryParameterSubSchema + // *NonBodyParameter_PathParameterSubSchema + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } +func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } +func (*NonBodyParameter) ProtoMessage() {} +func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() +} + +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` +} +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` +} +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` +} +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` +} + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { + return x.HeaderParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { + return x.FormDataParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { + return x.QueryParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { + return x.PathParameterSubSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } +} + +func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_FormDataParameterSubSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_QueryParameterSubSchema: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_PathParameterSubSchema: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NonBodyParameter) + switch tag { + case 1: // oneof.header_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HeaderParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} + return true, err + case 2: // oneof.form_data_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FormDataParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} + return true, err + case 3: // oneof.query_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} + return true, err + case 4: // oneof.path_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PathParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} + return true, err + default: + return false, nil + } +} + +func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + s := proto.Size(x.HeaderParameterSubSchema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_FormDataParameterSubSchema: + s := proto.Size(x.FormDataParameterSubSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_QueryParameterSubSchema: + s := proto.Size(x.QueryParameterSubSchema) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_PathParameterSubSchema: + s := proto.Size(x.PathParameterSubSchema) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oauth2AccessCodeSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } +func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *Oauth2AccessCodeSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ApplicationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } +func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ApplicationSecurity) ProtoMessage() {} +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *Oauth2ApplicationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ImplicitSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } +func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ImplicitSecurity) ProtoMessage() {} +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *Oauth2ImplicitSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2PasswordSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } +func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2PasswordSecurity) ProtoMessage() {} +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *Oauth2PasswordSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2PasswordSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2Scopes struct { + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } +func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } +func (*Oauth2Scopes) ProtoMessage() {} +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Operation struct { + Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` + // A brief summary of the operation. + Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + // A longer description of the operation, GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + // A unique identifier of the operation. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` + // A list of MIME types the API can consume. + Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *Operation) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Operation) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Operation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Operation) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Operation) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *Operation) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Operation) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Operation) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Operation) GetResponses() *Responses { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Operation) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Operation) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +func (m *Operation) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Operation) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Parameter struct { + // Types that are valid to be assigned to Oneof: + // *Parameter_BodyParameter + // *Parameter_NonBodyParameter + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *Parameter) Reset() { *m = Parameter{} } +func (m *Parameter) String() string { return proto.CompactTextString(m) } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` +} +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + +func (m *Parameter) GetOneof() isParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { + return x.BodyParameter + } + return nil +} + +func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { + return x.NonBodyParameter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } +} + +func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BodyParameter); err != nil { + return err + } + case *Parameter_NonBodyParameter: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NonBodyParameter); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Parameter) + switch tag { + case 1: // oneof.body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_BodyParameter{msg} + return true, err + case 2: // oneof.non_body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NonBodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_NonBodyParameter{msg} + return true, err + default: + return false, nil + } +} + +func _Parameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + s := proto.Size(x.BodyParameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Parameter_NonBodyParameter: + s := proto.Size(x.NonBodyParameter) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// One or more JSON representations for parameters +type ParameterDefinitions struct { + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } +func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } +func (*ParameterDefinitions) ProtoMessage() {} +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ParametersItem struct { + // Types that are valid to be assigned to Oneof: + // *ParametersItem_Parameter + // *ParametersItem_JsonReference + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ParametersItem) Reset() { *m = ParametersItem{} } +func (m *ParametersItem) String() string { return proto.CompactTextString(m) } +func (*ParametersItem) ProtoMessage() {} +func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` +} +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + +func (m *ParametersItem) GetOneof() isParametersItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ParametersItem) GetParameter() *Parameter { + if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { + return x.Parameter + } + return nil +} + +func (m *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } +} + +func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Parameter); err != nil { + return err + } + case *ParametersItem_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ParametersItem) + switch tag { + case 1: // oneof.parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Parameter) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_Parameter{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ParametersItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + s := proto.Size(x.Parameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ParametersItem_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type PathItem struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=ref" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathItem) Reset() { *m = PathItem{} } +func (m *PathItem) String() string { return proto.CompactTextString(m) } +func (*PathItem) ProtoMessage() {} +func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *PathItem) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *PathItem) GetGet() *Operation { + if m != nil { + return m.Get + } + return nil +} + +func (m *PathItem) GetPut() *Operation { + if m != nil { + return m.Put + } + return nil +} + +func (m *PathItem) GetPost() *Operation { + if m != nil { + return m.Post + } + return nil +} + +func (m *PathItem) GetDelete() *Operation { + if m != nil { + return m.Delete + } + return nil +} + +func (m *PathItem) GetOptions() *Operation { + if m != nil { + return m.Options + } + return nil +} + +func (m *PathItem) GetHead() *Operation { + if m != nil { + return m.Head + } + return nil +} + +func (m *PathItem) GetPatch() *Operation { + if m != nil { + return m.Patch + } + return nil +} + +func (m *PathItem) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *PathItem) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type PathParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } +func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*PathParameterSubSchema) ProtoMessage() {} +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *PathParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *PathParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *PathParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PathParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PathParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PathParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PathParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PathParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PathParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PathParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PathParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PathParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PathParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PathParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +type Paths struct { + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` +} + +func (m *Paths) Reset() { *m = Paths{} } +func (m *Paths) String() string { return proto.CompactTextString(m) } +func (*Paths) ProtoMessage() {} +func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func (m *Paths) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func (m *Paths) GetPath() []*NamedPathItem { + if m != nil { + return m.Path + } + return nil +} + +type PrimitivesItems struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } +func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } +func (*PrimitivesItems) ProtoMessage() {} +func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + +func (m *PrimitivesItems) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PrimitivesItems) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PrimitivesItems) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PrimitivesItems) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PrimitivesItems) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PrimitivesItems) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PrimitivesItems) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PrimitivesItems) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PrimitivesItems) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PrimitivesItems) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PrimitivesItems) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PrimitivesItems) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PrimitivesItems) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PrimitivesItems) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PrimitivesItems) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Properties struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Properties) Reset() { *m = Properties{} } +func (m *Properties) String() string { return proto.CompactTextString(m) } +func (*Properties) ProtoMessage() {} +func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } + +func (m *Properties) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type QueryParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } +func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*QueryParameterSubSchema) ProtoMessage() {} +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + +func (m *QueryParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *QueryParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *QueryParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *QueryParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *QueryParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *QueryParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *QueryParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *QueryParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *QueryParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *QueryParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *QueryParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *QueryParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *QueryParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *QueryParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Response struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } + +func (m *Response) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Response) GetSchema() *SchemaItem { + if m != nil { + return m.Schema + } + return nil +} + +func (m *Response) GetHeaders() *Headers { + if m != nil { + return m.Headers + } + return nil +} + +func (m *Response) GetExamples() *Examples { + if m != nil { + return m.Examples + } + return nil +} + +func (m *Response) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// One or more JSON representations for parameters +type ResponseDefinitions struct { + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } +func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } +func (*ResponseDefinitions) ProtoMessage() {} +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } + +func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ResponseValue struct { + // Types that are valid to be assigned to Oneof: + // *ResponseValue_Response + // *ResponseValue_JsonReference + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ResponseValue) Reset() { *m = ResponseValue{} } +func (m *ResponseValue) String() string { return proto.CompactTextString(m) } +func (*ResponseValue) ProtoMessage() {} +func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } + +type isResponseValue_Oneof interface { + isResponseValue_Oneof() +} + +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` +} +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ResponseValue_Response) isResponseValue_Oneof() {} +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +func (m *ResponseValue) GetOneof() isResponseValue_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ResponseValue) GetResponse() *Response { + if x, ok := m.GetOneof().(*ResponseValue_Response); ok { + return x.Response + } + return nil +} + +func (m *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } +} + +func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Response); err != nil { + return err + } + case *ResponseValue_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) + } + return nil +} + +func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseValue) + switch tag { + case 1: // oneof.response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Response) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_Response{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ResponseValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + s := proto.Size(x.Response) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseValue_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Responses) Reset() { *m = Responses{} } +func (m *Responses) String() string { return proto.CompactTextString(m) } +func (*Responses) ProtoMessage() {} +func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } + +func (m *Responses) GetResponseCode() []*NamedResponseValue { + if m != nil { + return m.ResponseCode + } + return nil +} + +func (m *Responses) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type Schema struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=ref" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + +func (m *Schema) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *Schema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Schema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Schema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Schema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Schema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Schema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Schema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Schema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Schema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Schema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Schema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Schema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Schema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Schema) GetMaxProperties() int64 { + if m != nil { + return m.MaxProperties + } + return 0 +} + +func (m *Schema) GetMinProperties() int64 { + if m != nil { + return m.MinProperties + } + return 0 +} + +func (m *Schema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *Schema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +func (m *Schema) GetType() *TypeItem { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema) GetItems() *ItemsItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *Schema) GetAllOf() []*Schema { + if m != nil { + return m.AllOf + } + return nil +} + +func (m *Schema) GetProperties() *Properties { + if m != nil { + return m.Properties + } + return nil +} + +func (m *Schema) GetDiscriminator() string { + if m != nil { + return m.Discriminator + } + return "" +} + +func (m *Schema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *Schema) GetXml() *Xml { + if m != nil { + return m.Xml + } + return nil +} + +func (m *Schema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Schema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *Schema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type SchemaItem struct { + // Types that are valid to be assigned to Oneof: + // *SchemaItem_Schema + // *SchemaItem_FileSchema + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SchemaItem) Reset() { *m = SchemaItem{} } +func (m *SchemaItem) String() string { return proto.CompactTextString(m) } +func (*SchemaItem) ProtoMessage() {} +func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } + +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() +} + +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` +} + +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SchemaItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { + return x.FileSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } +} + +func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *SchemaItem_FileSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SchemaItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_Schema{msg} + return true, err + case 2: // oneof.file_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileSchema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_FileSchema{msg} + return true, err + default: + return false, nil + } +} + +func _SchemaItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SchemaItem_FileSchema: + s := proto.Size(x.FileSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityDefinitions struct { + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } +func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitions) ProtoMessage() {} +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } + +func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type SecurityDefinitionsItem struct { + // Types that are valid to be assigned to Oneof: + // *SecurityDefinitionsItem_BasicAuthenticationSecurity + // *SecurityDefinitionsItem_ApiKeySecurity + // *SecurityDefinitionsItem_Oauth2ImplicitSecurity + // *SecurityDefinitionsItem_Oauth2PasswordSecurity + // *SecurityDefinitionsItem_Oauth2ApplicationSecurity + // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } +func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitionsItem) ProtoMessage() {} +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } + +type isSecurityDefinitionsItem_Oneof interface { + isSecurityDefinitionsItem_Oneof() +} + +type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` +} +type SecurityDefinitionsItem_ApiKeySecurity struct { + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` +} + +func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } +} + +func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_ApiKeySecurity: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SecurityDefinitionsItem) + switch tag { + case 1: // oneof.basic_authentication_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BasicAuthenticationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} + return true, err + case 2: // oneof.api_key_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ApiKeySecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} + return true, err + case 3: // oneof.oauth2_implicit_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ImplicitSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} + return true, err + case 4: // oneof.oauth2_password_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2PasswordSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} + return true, err + case 5: // oneof.oauth2_application_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ApplicationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} + return true, err + case 6: // oneof.oauth2_access_code_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2AccessCodeSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} + return true, err + default: + return false, nil + } +} + +func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + s := proto.Size(x.BasicAuthenticationSecurity) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_ApiKeySecurity: + s := proto.Size(x.ApiKeySecurity) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + s := proto.Size(x.Oauth2ImplicitSecurity) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + s := proto.Size(x.Oauth2PasswordSecurity) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + s := proto.Size(x.Oauth2ApplicationSecurity) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + s := proto.Size(x.Oauth2AccessCodeSecurity) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityRequirement struct { + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } +func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } +func (*SecurityRequirement) ProtoMessage() {} +func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } + +func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type StringArray struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *StringArray) Reset() { *m = StringArray{} } +func (m *StringArray) String() string { return proto.CompactTextString(m) } +func (*StringArray) ProtoMessage() {} +func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } + +func (m *StringArray) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Tag struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } + +func (m *Tag) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Tag) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Tag) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Tag) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type TypeItem struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *TypeItem) Reset() { *m = TypeItem{} } +func (m *TypeItem) String() string { return proto.CompactTextString(m) } +func (*TypeItem) ProtoMessage() {} +func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } + +func (m *TypeItem) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +// Any property starting with x- is valid. +type VendorExtension struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *VendorExtension) Reset() { *m = VendorExtension{} } +func (m *VendorExtension) String() string { return proto.CompactTextString(m) } +func (*VendorExtension) ProtoMessage() {} +func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } + +func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Xml struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Xml) Reset() { *m = Xml{} } +func (m *Xml) String() string { return proto.CompactTextString(m) } +func (*Xml) ProtoMessage() {} +func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } + +func (m *Xml) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Xml) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *Xml) GetPrefix() string { + if m != nil { + return m.Prefix + } + return "" +} + +func (m *Xml) GetAttribute() bool { + if m != nil { + return m.Attribute + } + return false +} + +func (m *Xml) GetWrapped() bool { + if m != nil { + return m.Wrapped + } + return false +} + +func (m *Xml) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func init() { + proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") + proto.RegisterType((*Any)(nil), "openapi.v2.Any") + proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") + proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") + proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") + proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") + proto.RegisterType((*Default)(nil), "openapi.v2.Default") + proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") + proto.RegisterType((*Document)(nil), "openapi.v2.Document") + proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") + proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") + proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") + proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") + proto.RegisterType((*Header)(nil), "openapi.v2.Header") + proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") + proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") + proto.RegisterType((*Info)(nil), "openapi.v2.Info") + proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") + proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") + proto.RegisterType((*License)(nil), "openapi.v2.License") + proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") + proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") + proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") + proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") + proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") + proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") + proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") + proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") + proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") + proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") + proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") + proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") + proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") + proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") + proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") + proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") + proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") + proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") + proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") + proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") + proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") + proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") + proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") + proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") + proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") + proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") + proto.RegisterType((*Response)(nil), "openapi.v2.Response") + proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") + proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") + proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") + proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") + proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") + proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") + proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") + proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") + proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") + proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") + proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") + proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") + proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") +} + +func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 3129 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, + 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, + 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, + 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, + 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, + 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d, + 0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7, + 0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7, + 0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f, + 0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c, + 0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c, + 0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc, + 0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01, + 0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2, + 0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5, + 0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23, + 0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51, + 0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2, + 0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5, + 0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c, + 0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89, + 0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f, + 0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d, + 0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c, + 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e, + 0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1, + 0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55, + 0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87, + 0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4, + 0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30, + 0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6, + 0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17, + 0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4, + 0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74, + 0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2, + 0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4, + 0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0, + 0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa, + 0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61, + 0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e, + 0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b, + 0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0, + 0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7, + 0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5, + 0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e, + 0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04, + 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d, + 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d, + 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13, + 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18, + 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9, + 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73, + 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a, + 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9, + 0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8, + 0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19, + 0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9, + 0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60, + 0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29, + 0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92, + 0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc, + 0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a, + 0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe, + 0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6, + 0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8, + 0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90, + 0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf, + 0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64, + 0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0, + 0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d, + 0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18, + 0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4, + 0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58, + 0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21, + 0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d, + 0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9, + 0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67, + 0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17, + 0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27, + 0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5, + 0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a, + 0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4, + 0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e, + 0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2, + 0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87, + 0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38, + 0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93, + 0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0, + 0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b, + 0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2, + 0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86, + 0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c, + 0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02, + 0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3, + 0xb2, 0x8f, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3, + 0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e, + 0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7, + 0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94, + 0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34, + 0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a, + 0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8, + 0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b, + 0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5, + 0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e, + 0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89, + 0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6, + 0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c, + 0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14, + 0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41, + 0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7, + 0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d, + 0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc, + 0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7, + 0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3, + 0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69, + 0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36, + 0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e, + 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c, + 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab, + 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33, + 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07, + 0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f, + 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f, + 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b, + 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc, + 0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e, + 0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad, + 0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16, + 0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89, + 0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45, + 0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8, + 0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b, + 0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08, + 0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93, + 0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7, + 0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb, + 0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74, + 0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41, + 0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20, + 0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a, + 0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94, + 0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e, + 0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4, + 0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6, + 0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36, + 0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a, + 0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58, + 0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46, + 0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88, + 0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c, + 0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81, + 0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61, + 0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4, + 0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea, + 0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf, + 0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b, + 0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0, + 0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4, + 0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5, + 0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44, + 0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f, + 0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36, + 0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd, + 0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e, + 0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb, + 0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d, + 0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77, + 0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9, + 0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5, + 0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4, + 0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6, + 0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f, + 0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb, + 0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7, + 0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66, + 0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9, + 0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d, + 0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2, + 0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c, + 0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2, + 0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62, + 0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7, + 0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1, + 0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21, + 0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e, + 0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6, + 0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe, + 0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c, + 0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae, + 0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e, + 0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1, + 0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8, + 0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47, + 0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf, + 0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, + 0xff, 0x3c, 0x01, 0x3f, 0x38, 0xe4, 0x3a, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto new file mode 100644 index 000000000..557c88072 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto @@ -0,0 +1,663 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v2; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v2"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +message AdditionalPropertiesItem { + oneof oneof { + Schema schema = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message ApiKeySecurity { + string type = 1; + string name = 2; + string in = 3; + string description = 4; + repeated NamedAny vendor_extension = 5; +} + +message BasicAuthenticationSecurity { + string type = 1; + string description = 2; + repeated NamedAny vendor_extension = 3; +} + +message BodyParameter { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 1; + // The name of the parameter. + string name = 2; + // Determines the location of the parameter. + string in = 3; + // Determines whether or not this parameter is required or optional. + bool required = 4; + Schema schema = 5; + repeated NamedAny vendor_extension = 6; +} + +// Contact information for the owners of the API. +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. + string url = 2; + // The email address of the contact person/organization. + string email = 3; + repeated NamedAny vendor_extension = 4; +} + +message Default { + repeated NamedAny additional_properties = 1; +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +message Definitions { + repeated NamedSchema additional_properties = 1; +} + +message Document { + // The Swagger version of this document. + string swagger = 1; + Info info = 2; + // The host (name or ip) of the API. Example: 'swagger.io' + string host = 3; + // The base path to the API. Example: '/api'. + string base_path = 4; + // The transfer protocol of the API. + repeated string schemes = 5; + // A list of MIME types accepted by the API. + repeated string consumes = 6; + // A list of MIME types the API can produce. + repeated string produces = 7; + Paths paths = 8; + Definitions definitions = 9; + ParameterDefinitions parameters = 10; + ResponseDefinitions responses = 11; + repeated SecurityRequirement security = 12; + SecurityDefinitions security_definitions = 13; + repeated Tag tags = 14; + ExternalDocs external_docs = 15; + repeated NamedAny vendor_extension = 16; +} + +message Examples { + repeated NamedAny additional_properties = 1; +} + +// information about external documentation +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// A deterministic version of a JSON Schema object. +message FileSchema { + string format = 1; + string title = 2; + string description = 3; + Any default = 4; + repeated string required = 5; + string type = 6; + bool read_only = 7; + ExternalDocs external_docs = 8; + Any example = 9; + repeated NamedAny vendor_extension = 10; +} + +message FormDataParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Header { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + string description = 18; + repeated NamedAny vendor_extension = 19; +} + +message HeaderParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +message Headers { + repeated NamedHeader additional_properties = 1; +} + +// General information about the API. +message Info { + // A unique and precise title of the API. + string title = 1; + // A semantic version number of the API. + string version = 2; + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + string description = 3; + // The terms of service for the API. + string terms_of_service = 4; + Contact contact = 5; + License license = 6; + repeated NamedAny vendor_extension = 7; +} + +message ItemsItem { + repeated Schema schema = 1; +} + +message JsonReference { + string _ref = 1; + string description = 2; +} + +message License { + // The name of the license type. It's encouraged to use an OSI compatible license. + string name = 1; + // The URL pointing to the license. + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +message NamedHeader { + // Map key + string name = 1; + // Mapped value + Header value = 2; +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +message NamedParameter { + // Map key + string name = 1; + // Mapped value + Parameter value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +message NamedResponse { + // Map key + string name = 1; + // Mapped value + Response value = 2; +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +message NamedResponseValue { + // Map key + string name = 1; + // Mapped value + ResponseValue value = 2; +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +message NamedSchema { + // Map key + string name = 1; + // Mapped value + Schema value = 2; +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +message NamedSecurityDefinitionsItem { + // Map key + string name = 1; + // Mapped value + SecurityDefinitionsItem value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +message NonBodyParameter { + oneof oneof { + HeaderParameterSubSchema header_parameter_sub_schema = 1; + FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + QueryParameterSubSchema query_parameter_sub_schema = 3; + PathParameterSubSchema path_parameter_sub_schema = 4; + } +} + +message Oauth2AccessCodeSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string token_url = 5; + string description = 6; + repeated NamedAny vendor_extension = 7; +} + +message Oauth2ApplicationSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2ImplicitSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2PasswordSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2Scopes { + repeated NamedString additional_properties = 1; +} + +message Operation { + repeated string tags = 1; + // A brief summary of the operation. + string summary = 2; + // A longer description of the operation, GitHub Flavored Markdown is allowed. + string description = 3; + ExternalDocs external_docs = 4; + // A unique identifier of the operation. + string operation_id = 5; + // A list of MIME types the API can produce. + repeated string produces = 6; + // A list of MIME types the API can consume. + repeated string consumes = 7; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 8; + Responses responses = 9; + // The transfer protocol of the API. + repeated string schemes = 10; + bool deprecated = 11; + repeated SecurityRequirement security = 12; + repeated NamedAny vendor_extension = 13; +} + +message Parameter { + oneof oneof { + BodyParameter body_parameter = 1; + NonBodyParameter non_body_parameter = 2; + } +} + +// One or more JSON representations for parameters +message ParameterDefinitions { + repeated NamedParameter additional_properties = 1; +} + +message ParametersItem { + oneof oneof { + Parameter parameter = 1; + JsonReference json_reference = 2; + } +} + +message PathItem { + string _ref = 1; + Operation get = 2; + Operation put = 3; + Operation post = 4; + Operation delete = 5; + Operation options = 6; + Operation head = 7; + Operation patch = 8; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 9; + repeated NamedAny vendor_extension = 10; +} + +message PathParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +message Paths { + repeated NamedAny vendor_extension = 1; + repeated NamedPathItem path = 2; +} + +message PrimitivesItems { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + repeated NamedAny vendor_extension = 18; +} + +message Properties { + repeated NamedSchema additional_properties = 1; +} + +message QueryParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Response { + string description = 1; + SchemaItem schema = 2; + Headers headers = 3; + Examples examples = 4; + repeated NamedAny vendor_extension = 5; +} + +// One or more JSON representations for parameters +message ResponseDefinitions { + repeated NamedResponse additional_properties = 1; +} + +message ResponseValue { + oneof oneof { + Response response = 1; + JsonReference json_reference = 2; + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +message Responses { + repeated NamedResponseValue response_code = 1; + repeated NamedAny vendor_extension = 2; +} + +// A deterministic version of a JSON Schema object. +message Schema { + string _ref = 1; + string format = 2; + string title = 3; + string description = 4; + Any default = 5; + double multiple_of = 6; + double maximum = 7; + bool exclusive_maximum = 8; + double minimum = 9; + bool exclusive_minimum = 10; + int64 max_length = 11; + int64 min_length = 12; + string pattern = 13; + int64 max_items = 14; + int64 min_items = 15; + bool unique_items = 16; + int64 max_properties = 17; + int64 min_properties = 18; + repeated string required = 19; + repeated Any enum = 20; + AdditionalPropertiesItem additional_properties = 21; + TypeItem type = 22; + ItemsItem items = 23; + repeated Schema all_of = 24; + Properties properties = 25; + string discriminator = 26; + bool read_only = 27; + Xml xml = 28; + ExternalDocs external_docs = 29; + Any example = 30; + repeated NamedAny vendor_extension = 31; +} + +message SchemaItem { + oneof oneof { + Schema schema = 1; + FileSchema file_schema = 2; + } +} + +message SecurityDefinitions { + repeated NamedSecurityDefinitionsItem additional_properties = 1; +} + +message SecurityDefinitionsItem { + oneof oneof { + BasicAuthenticationSecurity basic_authentication_security = 1; + ApiKeySecurity api_key_security = 2; + Oauth2ImplicitSecurity oauth2_implicit_security = 3; + Oauth2PasswordSecurity oauth2_password_security = 4; + Oauth2ApplicationSecurity oauth2_application_security = 5; + Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + } +} + +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +message StringArray { + repeated string value = 1; +} + +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny vendor_extension = 4; +} + +message TypeItem { + repeated string value = 1; +} + +// Any property starting with x- is valid. +message VendorExtension { + repeated NamedAny additional_properties = 1; +} + +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny vendor_extension = 6; +} + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md new file mode 100644 index 000000000..836fb32a7 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md @@ -0,0 +1,16 @@ +# OpenAPI v2 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model +and related code for supporting OpenAPI v2. + +Gnostic applications and plugins can use OpenAPIv2.proto +to generate Protocol Buffer support code for their preferred languages. + +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI +descriptions into the Protocol Buffer-based datastructures +generated from OpenAPIv2.proto. + +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic +compiler generator, and OpenAPIv2.pb.go is generated by +protoc, the Protocol Buffer compiler, and protoc-gen-go, the +Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json new file mode 100644 index 000000000..2815a26ea --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json @@ -0,0 +1,1610 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for parameters" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + }, + "description": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md new file mode 100644 index 000000000..848b16c69 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -0,0 +1,3 @@ +# Compiler support code + +This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go new file mode 100644 index 000000000..2e5242ee7 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -0,0 +1,41 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +type Context struct { + Parent *Context + Name string + ExtensionHandlers *[]ExtensionHandler +} + +func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} +} + +func NewContext(name string, parent *Context) *Context { + if parent != nil { + return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + } else { + return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} + } +} + +func (context *Context) Description() string { + if context.Parent != nil { + return context.Parent.Description() + "." + context.Name + } else { + return context.Name + } +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go new file mode 100644 index 000000000..942536a79 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -0,0 +1,59 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// basic error type +type Error struct { + Context *Context + Message string +} + +func NewError(context *Context, message string) *Error { + return &Error{Context: context, Message: message} +} + +func (err *Error) Error() string { + if err.Context != nil { + return "ERROR " + err.Context.Description() + " " + err.Message + } else { + return "ERROR " + err.Message + } +} + +// container for groups of errors +type ErrorGroup struct { + Errors []error +} + +func NewErrorGroupOrNil(errors []error) error { + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } else { + return &ErrorGroup{Errors: errors} + } +} + +func (group *ErrorGroup) Error() string { + result := "" + for i, err := range group.Errors { + if i > 0 { + result += "\n" + } + result += err.Error() + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go new file mode 100644 index 000000000..426663c2b --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go @@ -0,0 +1,99 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + + "strings" + + "errors" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + ext_plugin "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v2" +) + +type ExtensionHandler struct { + Name string +} + +func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { + handled := false + var errFromPlugin error + var outFromPlugin *any.Any + + if context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { + for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { + outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) + if outFromPlugin == nil { + continue + } else { + handled = true + break + } + } + } + return handled, outFromPlugin, errFromPlugin +} + +func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + binary, _ := yaml.Marshal(in) + + request := &ext_plugin.ExtensionHandlerRequest{} + + version := &ext_plugin.Version{} + version.Major = 0 + version.Minor = 1 + version.Patch = 0 + request.CompilerVersion = version + + request.Wrapper = &ext_plugin.Wrapper{} + + request.Wrapper.Version = "v2" + request.Wrapper.Yaml = string(binary) + request.Wrapper.ExtensionName = extensionName + + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + + if err != nil { + fmt.Printf("Error: %+v\n", err) + return nil, err + } + response := &ext_plugin.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil { + fmt.Printf("Error: %+v\n", err) + fmt.Printf("%s\n", string(output)) + return nil, err + } + if !response.Handled { + return nil, nil + } + if len(response.Error) != 0 { + message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) + return nil, errors.New(message) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go new file mode 100644 index 000000000..56306c40f --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -0,0 +1,193 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "regexp" + "sort" + "strings" +) + +// compiler helper functions, usually called from generated code + +func UnpackMap(in interface{}) (yaml.MapSlice, bool) { + m, ok := in.(yaml.MapSlice) + if ok { + return m, ok + } else { + // do we have an empty array? + a, ok := in.([]interface{}) + if ok && len(a) == 0 { + // if so, return an empty map + return yaml.MapSlice{}, ok + } else { + return nil, ok + } + } +} + +func SortedKeysForMap(m yaml.MapSlice) []string { + keys := make([]string, 0) + for _, item := range m { + keys = append(keys, item.Key.(string)) + } + sort.Strings(keys) + return keys +} + +func MapHasKey(m yaml.MapSlice, key string) bool { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return true + } + } + return false +} + +func MapValueForKey(m yaml.MapSlice, key string) interface{} { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return item.Value + } + } + return nil +} + +func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { + stringArray := make([]string, 0) + for _, item := range interfaceArray { + v, ok := item.(string) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +func PatternMatches(pattern string, value string) bool { + // if pattern contains a subpattern like "{path}", replace it with ".*" + if pattern[0] != '^' { + subpatternPattern := regexp.MustCompile("^.*(\\{.*\\}).*$") + if matches := subpatternPattern.FindSubmatch([]byte(pattern)); matches != nil { + match := string(matches[1]) + pattern = strings.Replace(pattern, match, ".*", -1) + } + } + matched, err := regexp.Match(pattern, []byte(value)) + if err != nil { + panic(err) + } + return matched +} + +func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { + missingKeys := make([]string, 0) + for _, k := range requiredKeys { + if !MapHasKey(m, k) { + missingKeys = append(missingKeys, k) + } + } + return missingKeys +} + +func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []string) []string { + invalidKeys := make([]string, 0) + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok { + key := itemKey + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if PatternMatches(allowedPattern, key) { + found = true + break + } + } + if !found { + invalidKeys = append(invalidKeys, key) + } + } + } + } + return invalidKeys +} + +// describe a map (for debugging purposes) +func DescribeMap(in interface{}, indent string) string { + description := "" + m, ok := in.(map[string]interface{}) + if ok { + keys := make([]string, 0) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := m[k] + description += fmt.Sprintf("%s%s:\n", indent, k) + description += DescribeMap(v, indent+" ") + } + return description + } + a, ok := in.([]interface{}) + if ok { + for i, v := range a { + description += fmt.Sprintf("%s%d:\n", indent, i) + description += DescribeMap(v, indent+" ") + } + return description + } + description += fmt.Sprintf("%s%+v\n", indent, in) + return description +} + +func PluralProperties(count int) string { + if count == 1 { + return "property" + } else { + return "properties" + } +} + +func StringArrayContainsValue(array []string, value string) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +func StringArrayContainsValues(array []string, values []string) bool { + for _, value := range values { + if !StringArrayContainsValue(array, value) { + return false + } + } + return true +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go new file mode 100644 index 000000000..9713a21cc --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compiler provides support functions to generated compiler code. +package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go new file mode 100644 index 000000000..1878e0608 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -0,0 +1,167 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "io/ioutil" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" +) + +var file_cache map[string][]byte +var info_cache map[string]interface{} +var count int64 + +var VERBOSE_READER = false + +func initializeFileCache() { + if file_cache == nil { + file_cache = make(map[string][]byte, 0) + } +} + +func initializeInfoCache() { + if info_cache == nil { + info_cache = make(map[string]interface{}, 0) + } +} + +func FetchFile(fileurl string) ([]byte, error) { + initializeFileCache() + bytes, ok := file_cache[fileurl] + if ok { + if VERBOSE_READER { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } + log.Printf("Fetching %s", fileurl) + response, err := http.Get(fileurl) + if err != nil { + return nil, err + } else { + defer response.Body.Close() + bytes, err := ioutil.ReadAll(response.Body) + if err == nil { + file_cache[fileurl] = bytes + } + return bytes, err + } +} + +// read a file and unmarshal it as a yaml.MapSlice +func ReadInfoForFile(filename string) (interface{}, error) { + initializeInfoCache() + info, ok := info_cache[filename] + if ok { + if VERBOSE_READER { + log.Printf("Cache hit info for file %s", filename) + } + return info, nil + } + if VERBOSE_READER { + log.Printf("Reading info for file %s", filename) + } + + // is the filename a url? + fileurl, _ := url.Parse(filename) + if fileurl.Scheme != "" { + // yes, fetch it + bytes, err := FetchFile(filename) + if err != nil { + return nil, err + } + var info yaml.MapSlice + err = yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + info_cache[filename] = info + return info, nil + } else { + // no, it's a local filename + bytes, err := ioutil.ReadFile(filename) + if err != nil { + log.Printf("File error: %v\n", err) + return nil, err + } + var info yaml.MapSlice + err = yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + info_cache[filename] = info + return info, nil + } +} + +// read a file and return the fragment needed to resolve a $ref +func ReadInfoForRef(basefile string, ref string) (interface{}, error) { + initializeInfoCache() + { + info, ok := info_cache[ref] + if ok { + if VERBOSE_READER { + log.Printf("Cache hit for ref %s#%s", basefile, ref) + } + return info, nil + } + } + if VERBOSE_READER { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } + count = count + 1 + basedir, _ := filepath.Split(basefile) + parts := strings.Split(ref, "#") + var filename string + if parts[0] != "" { + filename = basedir + parts[0] + } else { + filename = basefile + } + info, err := ReadInfoForFile(filename) + if err != nil { + log.Printf("File error: %v\n", err) + } else { + if len(parts) > 1 { + path := strings.Split(parts[1], "/") + for i, key := range path { + if i > 0 { + m, ok := info.(yaml.MapSlice) + if ok { + found := false + for _, section := range m { + if section.Key == key { + info = section.Value + found = true + } + } + if !found { + info_cache[ref] = nil + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + } + } + } + } + } + info_cache[ref] = info + return info, nil +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh new file mode 100755 index 000000000..64badc403 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh @@ -0,0 +1,7 @@ +go get github.com/golang/protobuf/protoc-gen-go + +protoc \ +--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto + +go build +go install diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md new file mode 100644 index 000000000..ff1c2eb1e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -0,0 +1,5 @@ +# Extensions + +This directory contains support code for building Gnostic extensions and associated examples. + +Extensions are used to compile vendor or specification extensions into protocol buffer structures. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go new file mode 100644 index 000000000..b14f1f945 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -0,0 +1,219 @@ +// Code generated by protoc-gen-go. +// source: extension.proto +// DO NOT EDIT! + +/* +Package openapiextension_v1 is a generated protocol buffer package. + +It is generated from these files: + extension.proto + +It has these top-level messages: + Version + ExtensionHandlerRequest + ExtensionHandlerResponse + Wrapper +*/ +package openapiextension_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of OpenAPI compiler. +type Version struct { + Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Version) GetMajor() int32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil { + return m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil { + return m.Suffix + } + return "" +} + +// An encoded Request is written to the ExtensionHandler's stdin. +type ExtensionHandlerRequest struct { + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to openapic. + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` + // The version number of openapi compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } +func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerRequest) ProtoMessage() {} +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if m != nil { + return m.Wrapper + } + return nil +} + +func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +type ExtensionHandlerResponse struct { + // true if the extension is handled by the extension handler; false otherwise + Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` + // text output + Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` +} + +func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } +func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerResponse) ProtoMessage() {} +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ExtensionHandlerResponse) GetHandled() bool { + if m != nil { + return m.Handled + } + return false +} + +func (m *ExtensionHandlerResponse) GetError() []string { + if m != nil { + return m.Error + } + return nil +} + +func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +type Wrapper struct { + // version of the OpenAPI specification in which this extension was written. + Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // Name of the extension + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` + // Must be a valid yaml for the proto + Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Wrapper) Reset() { *m = Wrapper{} } +func (m *Wrapper) String() string { return proto.CompactTextString(m) } +func (*Wrapper) ProtoMessage() {} +func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Wrapper) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Wrapper) GetExtensionName() string { + if m != nil { + return m.ExtensionName + } + return "" +} + +func (m *Wrapper) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") + proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") + proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") + proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") +} + +func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 355 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, + 0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, + 0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, + 0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, + 0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4, + 0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4, + 0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0, + 0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34, + 0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a, + 0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a, + 0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66, + 0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2, + 0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d, + 0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3, + 0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c, + 0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa, + 0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab, + 0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3, + 0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62, + 0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23, + 0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1, + 0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52, + 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto new file mode 100644 index 000000000..806760a13 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -0,0 +1,93 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +package openapiextension.v1; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIExtensionV1"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapic.v1"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +// +option objc_class_prefix = "OAE"; // "OpenAPI Extension" + +// The version number of OpenAPI compiler. +message Version { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + string suffix = 4; +} + +// An encoded Request is written to the ExtensionHandler's stdin. +message ExtensionHandlerRequest { + + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to openapic. + Wrapper wrapper = 1; + + // The version number of openapi compiler. + Version compiler_version = 3; +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +message ExtensionHandlerResponse { + + // true if the extension is handled by the extension handler; false otherwise + bool handled = 1; + + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + repeated string error = 2; + + // text output + google.protobuf.Any value = 3; +} + +message Wrapper { + // version of the OpenAPI specification in which this extension was written. + string version = 1; + + // Name of the extension + string extension_name = 2; + + // Must be a valid yaml for the proto + string yaml = 3; +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go new file mode 100644 index 000000000..12800ac58 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -0,0 +1,83 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapiextension_v1 + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "gopkg.in/yaml.v2" +) + +type documentHandler func(version string, extensionName string, document string) +type extensionHandler func(name string, info yaml.MapSlice) (bool, proto.Message, error) + +func forInputYamlFromOpenapic(handler documentHandler) { + data, err := ioutil.ReadAll(os.Stdin) + + if err != nil { + fmt.Println("File error:", err.Error()) + os.Exit(1) + } + request := &ExtensionHandlerRequest{} + err = proto.Unmarshal(data, request) + handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) +} + +func ProcessExtension(handleExtension extensionHandler) { + response := &ExtensionHandlerResponse{} + forInputYamlFromOpenapic( + func(version string, extensionName string, yamlInput string) { + var info yaml.MapSlice + var newObject proto.Message + var err error + err = yaml.Unmarshal([]byte(yamlInput), &info) + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + + handled, newObject, err := handleExtension(extensionName, info) + if !handled { + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + + // If we reach here, then the extension is handled + response.Handled = true + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + response.Value, err = ptypes.MarshalAny(newObject) + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + }) + + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) +} diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/gorilla/websocket/.travis.yml b/vendor/github.com/gorilla/websocket/.travis.yml new file mode 100644 index 000000000..8687342e9 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.1 + - 1.2 + - tip diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 000000000..b003eca0c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,8 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 000000000..9171c9722 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 000000000..9ad75a0f5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,59 @@ +# Gorilla WebSocket + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +### Documentation + +* [API Reference](http://godoc.org/github.com/gorilla/websocket) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + +### Gorilla WebSocket compared with other packages + + + + + + + + + + + + + + + + + + +
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Limit size of received messageYesNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
+ +Notes: + +1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). +2. The application can get the type of a received data message by implementing + a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) + function. +3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. + Read returns when the input buffer is full or a frame boundary is + encountered. Each call to Write sends a single frame message. The Gorilla + io.Reader and io.WriteCloser operate on a single WebSocket message. + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 000000000..93db8ddc3 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,269 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + acceptKey := computeAcceptKey(challengeKey) + + c = newConn(netConn, false, readBufSize, writeBufSize) + p := c.writeBuf[:0] + p = append(p, "GET "...) + p = append(p, u.RequestURI()...) + p = append(p, " HTTP/1.1\r\nHost: "...) + p = append(p, u.Host...) + // "Upgrade" is capitalized for servers that do not use case insensitive + // comparisons on header tokens. + p = append(p, "\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Version: 13\r\nSec-WebSocket-Key: "...) + p = append(p, challengeKey...) + p = append(p, "\r\n"...) + for k, vs := range requestHeader { + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + p = append(p, v...) + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + if _, err := netConn.Write(p); err != nil { + return nil, nil, err + } + + resp, err := http.ReadResponse(c.br, &http.Request{Method: "GET", URL: u}) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != acceptKey { + return nil, resp, ErrBadHandshake + } + c.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + return c, resp, nil +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // Input and output buffer sizes. If the buffer size is zero, then a + // default value of 4096 is used. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +// parseURL parses the URL. The url.Parse function is not used here because +// url.Parse mangles the path. +func parseURL(s string) (*url.URL, error) { + // From the RFC: + // + // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] + // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] + // + // We don't use the net/url parser here because the dialer interface does + // not provide a way for applications to work around percent deocding in + // the net/url parser. + + var u url.URL + switch { + case strings.HasPrefix(s, "ws://"): + u.Scheme = "ws" + s = s[len("ws://"):] + case strings.HasPrefix(s, "wss://"): + u.Scheme = "wss" + s = s[len("wss://"):] + default: + return nil, errMalformedURL + } + + u.Host = s + u.Opaque = "/" + if i := strings.Index(s, "/"); i >= 0 { + u.Host = s[:i] + u.Opaque = s[i:] + } + + if strings.Contains(u.Host, "@") { + // WebSocket URIs do not contain user information. + return nil, errMalformedURL + } + + return &u, nil +} + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + if u.Scheme == "wss" { + hostPort += ":443" + } else { + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default zero values. +var DefaultDialer *Dialer + +// Dial creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + u, err := parseURL(urlStr) + if err != nil { + return nil, nil, err + } + + hostPort, hostNoPort := hostPortNoPort(u) + + if d == nil { + d = &Dialer{} + } + + var deadline time.Time + if d.HandshakeTimeout != 0 { + deadline = time.Now().Add(d.HandshakeTimeout) + } + + netDial := d.NetDial + if netDial == nil { + netDialer := &net.Dialer{Deadline: deadline} + netDial = netDialer.Dial + } + + netConn, err := netDial("tcp", hostPort) + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if err := netConn.SetDeadline(deadline); err != nil { + return nil, nil, err + } + + if u.Scheme == "wss" { + cfg := d.TLSClientConfig + if cfg == nil { + cfg = &tls.Config{ServerName: hostNoPort} + } else if cfg.ServerName == "" { + shallowCopy := *cfg + cfg = &shallowCopy + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + if err := tlsConn.Handshake(); err != nil { + return nil, nil, err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return nil, nil, err + } + } + } + + if len(d.Subprotocols) > 0 { + h := http.Header{} + for k, v := range requestHeader { + h[k] = v + } + h.Set("Sec-Websocket-Protocol", strings.Join(d.Subprotocols, ", ")) + requestHeader = h + } + + if len(requestHeader["Host"]) > 0 { + // This can be used to supply a Host: header which is different from + // the dial address. + u.Host = requestHeader.Get("Host") + + // Drop "Host" header + h := http.Header{} + for k, v := range requestHeader { + if k == "Host" { + continue + } + h[k] = v + } + requestHeader = h + } + + conn, resp, err := NewClient(netConn, u, requestHeader, d.ReadBufferSize, d.WriteBufferSize) + + if err != nil { + if err == ErrBadHandshake { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + } + return nil, resp, err + } + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 000000000..e719f1ce6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,825 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "time" +) + +const ( + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + finalBit = 1 << 7 + maskBit = 1 << 7 + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// closeError represents close frame. +type closeError struct { + code int + text string +} + +func (e *closeError) Error() string { + return "websocket: close " + strconv.Itoa(e.code) + " " + e.text +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true} + errUnexpectedEOF = &closeError{code: CloseAbnormalClosure, text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +// Conn represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan bool // used as mutex to protect write to conn and closeSent + closeSent bool // true if close message was sent + + // Message writer fields. + writeErr error + writeBuf []byte // frame is constructed in this buffer. + writePos int // end of data in writeBuf. + writeFrameType int // type of the current frame. + writeSeq int // incremented to invalidate message writers. + writeDeadline time.Time + + // Read fields + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readSeq int // incremented to invalidate message readers. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { + mu := make(chan bool, 1) + mu <- true + + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } + if writeBufferSize == 0 { + writeBufferSize = defaultWriteBufferSize + } + + c := &Conn{ + isServer: isServer, + br: bufio.NewReaderSize(conn, readBufferSize), + conn: conn, + mu: mu, + readFinal: true, + writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize), + writeFrameType: noFrame, + writePos: maxFrameHeaderSize, + } + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting for a close frame. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { + <-c.mu + defer func() { c.mu <- true }() + + if c.closeSent { + return ErrCloseSent + } else if frameType == CloseMessage { + c.closeSent = true + } + + c.conn.SetWriteDeadline(deadline) + for _, buf := range bufs { + if len(buf) > 0 { + n, err := c.conn.Write(buf) + if n != len(buf) { + // Close on partial write. + c.conn.Close() + } + if err != nil { + return err + } + } + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + if c.closeSent { + return ErrCloseSent + } else if messageType == CloseMessage { + c.closeSent = true + } + + c.conn.SetWriteDeadline(deadline) + n, err := c.conn.Write(buf) + if n != 0 && n != len(buf) { + c.conn.Close() + } + return err +} + +// NextWriter returns a writer for the next message to send. The writer's +// Close method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// The NextWriter method and the writers returned from the method cannot be +// accessed by more than one goroutine at a time. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if c.writeErr != nil { + return nil, c.writeErr + } + + if c.writeFrameType != noFrame { + if err := c.flushFrame(true, nil); err != nil { + return nil, err + } + } + + if !isControl(messageType) && !isData(messageType) { + return nil, errBadWriteOpCode + } + + c.writeFrameType = messageType + return messageWriter{c, c.writeSeq}, nil +} + +func (c *Conn) flushFrame(final bool, extra []byte) error { + length := c.writePos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(c.writeFrameType) && + (!final || length > maxControlFramePayloadSize) { + c.writeSeq++ + c.writeFrameType = noFrame + c.writePos = maxFrameHeaderSize + return errInvalidControlFrame + } + + b0 := byte(c.writeFrameType) + if final { + b0 |= finalBit + } + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos]) + if len(extra) > 0 { + c.writeErr = errors.New("websocket: internal error, extra used in client mode") + return c.writeErr + } + } + + // Write the buffers to the connection. + c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra) + + // Setup for next frame. + c.writePos = maxFrameHeaderSize + c.writeFrameType = continuationFrame + if final { + c.writeSeq++ + c.writeFrameType = noFrame + } + return c.writeErr +} + +type messageWriter struct { + c *Conn + seq int +} + +func (w messageWriter) err() error { + c := w.c + if c.writeSeq != w.seq { + return errWriteClosed + } + if c.writeErr != nil { + return c.writeErr + } + return nil +} + +func (w messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.c.writePos + if n <= 0 { + if err := w.c.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.c.writePos + } + if n > max { + n = max + } + return n, nil +} + +func (w messageWriter) write(final bool, p []byte) (int, error) { + if err := w.err(); err != nil { + return 0, err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.c.flushFrame(final, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.c.writePos:], p[:n]) + w.c.writePos += n + p = p[n:] + } + return nn, nil +} + +func (w messageWriter) Write(p []byte) (int, error) { + return w.write(false, p) +} + +func (w messageWriter) WriteString(p string) (int, error) { + if err := w.err(); err != nil { + return 0, err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.c.writePos:], p[:n]) + w.c.writePos += n + p = p[n:] + } + return nn, nil +} + +func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if err := w.err(); err != nil { + return 0, err + } + for { + if w.c.writePos == len(w.c.writeBuf) { + err = w.c.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.c.writePos:]) + w.c.writePos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w messageWriter) Close() error { + if err := w.err(); err != nil { + return err + } + return w.c.flushFrame(true, nil) +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + wr, err := c.NextWriter(messageType) + if err != nil { + return err + } + w := wr.(messageWriter) + if _, err := w.write(true, data); err != nil { + return err + } + if c.writeSeq == w.seq { + if err := c.flushFrame(true, nil); err != nil { + return err + } + } + return nil +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +// readFull is like io.ReadFull except that io.EOF is never returned. +func (c *Conn) readFull(p []byte) (err error) { + var n int + for n < len(p) && err == nil { + var nn int + nn, err = c.br.Read(p[n:]) + n += nn + } + if n == len(p) { + err = nil + } else if err == io.EOF { + err = errUnexpectedEOF + } + return +} + +func (c *Conn) advanceFrame() (int, error) { + + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + var b [8]byte + if err := c.readFull(b[:2]); err != nil { + return noFrame, err + } + + final := b[0]&finalBit != 0 + frameType := int(b[0] & 0xf) + reserved := int((b[0] >> 4) & 0x7) + mask := b[1]&maskBit != 0 + c.readRemaining = int64(b[1] & 0x7f) + + if reserved != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits " + strconv.Itoa(reserved)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + if err := c.readFull(b[:2]); err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(b[:2])) + case 127: + if err := c.readFull(b[:8]); err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(b[:8])) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + if err := c.readFull(c.readMaskKey[:]); err != nil { + return noFrame, err + } + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload = make([]byte, c.readRemaining) + c.readRemaining = 0 + if err := c.readFull(payload); err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + c.WriteControl(CloseMessage, []byte{}, time.Now().Add(writeWait)) + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + closeText = string(payload[2:]) + } + switch closeCode { + case CloseNormalClosure, CloseGoingAway: + return noFrame, io.EOF + default: + return noFrame, &closeError{code: closeCode, text: closeText} + } + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// The NextReader method and the readers returned from the method cannot be +// accessed by more than one goroutine at a time. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + + c.readSeq++ + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + if frameType == TextMessage || frameType == BinaryMessage { + return frameType, messageReader{c, c.readSeq}, nil + } + } + return noFrame, nil, c.readErr +} + +type messageReader struct { + c *Conn + seq int +} + +func (r messageReader) Read(b []byte) (int, error) { + + if r.seq != r.c.readSeq { + return 0, io.EOF + } + + for r.c.readErr == nil { + + if r.c.readRemaining > 0 { + if int64(len(b)) > r.c.readRemaining { + b = b[:r.c.readRemaining] + } + n, err := r.c.br.Read(b) + r.c.readErr = hideTempErr(err) + if r.c.isServer { + r.c.readMaskPos = maskBytes(r.c.readMaskKey, r.c.readMaskPos, b[:n]) + } + r.c.readRemaining -= int64(n) + return n, r.c.readErr + } + + if r.c.readFinal { + r.c.readSeq++ + return 0, io.EOF + } + + frameType, err := r.c.advanceFrame() + switch { + case err != nil: + r.c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + r.c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := r.c.readErr + if err == io.EOF && r.seq == r.c.readSeq { + err = errUnexpectedEOF + } + return 0, err +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size for a message read from the peer. If a +// message exceeds the limit, the connection sends a close frame to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The default ping handler sends a pong to the peer. +func (c *Conn) SetPingHandler(h func(string) error) { + if h == nil { + h = func(message string) error { + c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + return nil + } + } + c.handlePing = h +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The default pong handler does nothing. +func (c *Conn) SetPongHandler(h func(string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +func FormatCloseMessage(closeCode int, text string) []byte { + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 000000000..f52925dd1 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,148 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application uses +// the Upgrade function from an Upgrader object with a HTTP request handler +// to get a pointer to a Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// return +// } +// if err = conn.WriteMessage(messageType, p); err != nil { +// return err +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// snippet shows how to echo messages using the NextWriter and NextReader +// methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received ping and pong messages by invoking a callback +// function set with SetPingHandler and SetPongHandler methods. These callback +// functions can be invoked from the ReadMessage method, the NextReader method +// or from a call to the data message reader returned from NextReader. +// +// Connections handle received close messages by returning an error from the +// ReadMessage method, the NextReader method or from a call to the data message +// reader returned from NextReader. +// +// Concurrency +// +// Connections do not support concurrent calls to the write methods +// (NextWriter, SetWriteDeadline, WriteMessage) or concurrent calls to the read +// methods methods (NextReader, SetReadDeadline, ReadMessage). Connections do +// support a concurrent reader and writer. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Read is Required +// +// The application must read the connection to process ping and close messages +// sent from the peer. If the application is not otherwise interested in +// messages from the peer, then the application should start a goroutine to read +// and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and not equal to the +// Host request header. +// +// An application can allow connections from any origin by specifying a +// function that always returns true: +// +// var upgrader = websocket.Upgrader{ +// CheckOrigin: func(r *http.Request) bool { return true }, +// } +// +// The deprecated Upgrade function does not enforce an origin policy. It's the +// application's responsibility to check the Origin header before calling +// Upgrade. +package websocket diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 000000000..18e62f225 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON is deprecated, use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v to the connection. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON is deprecated, use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // Decode returns io.EOF when the message is empty or all whitespace. + // Convert to io.ErrUnexpectedEOF so that application can distinguish + // between an error reading the JSON value and the connection closing. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 000000000..e56a00493 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,247 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a default value of 4096 is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is set, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, the host in the Origin header must not be set or + // must match the host of the request. + CheckOrigin func(r *http.Request) bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return u.Host == r.Host +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-Websocket-Protocol). +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + if values := r.Header["Sec-Websocket-Version"]; len(values) == 0 || values[0] != "13" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: version != 13") + } + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find connection header with token 'upgrade'") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find upgrade header with token 'websocket'") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: origin not allowed") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: key missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + var ( + netConn net.Conn + br *bufio.Reader + err error + ) + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var rw *bufio.ReadWriter + netConn, rw, err = h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + br = rw.Reader + + if br.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize) + c.subprotocol = subprotocol + + p := c.writeBuf[:0] + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-Websocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// This function is deprecated, use websocket.Upgrader instead. +// +// The application is responsible for checking the request origin before +// calling Upgrade. An example implementation of the same origin policy is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", 403) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 000000000..ffdc265ed --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,44 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" +) + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains token. +func tokenListContainsValue(header http.Header, name string, value string) bool { + for _, v := range header[name] { + for _, s := range strings.Split(v, ",") { + if strings.EqualFold(value, strings.TrimSpace(s)) { + return true + } + } + } + return false +} + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} diff --git a/vendor/github.com/howeyc/gopass/pass.go b/vendor/github.com/howeyc/gopass/pass.go index 31c853ae2..f5bd5a51a 100644 --- a/vendor/github.com/howeyc/gopass/pass.go +++ b/vendor/github.com/howeyc/gopass/pass.go @@ -7,9 +7,14 @@ import ( "os" ) -var defaultGetCh = func() (byte, error) { +type FdReader interface { + io.Reader + Fd() uintptr +} + +var defaultGetCh = func(r io.Reader) (byte, error) { buf := make([]byte, 1) - if n, err := os.Stdin.Read(buf); n == 0 || err != nil { + if n, err := r.Read(buf); n == 0 || err != nil { if err != nil { return 0, err } @@ -28,9 +33,10 @@ var ( ) // getPasswd returns the input read from terminal. +// If prompt is not empty, it will be output as a prompt to the user // If masked is true, typing will be matched by asterisks on the screen. // Otherwise, typing will echo nothing. -func getPasswd(masked bool) ([]byte, error) { +func getPasswd(prompt string, masked bool, r FdReader, w io.Writer) ([]byte, error) { var err error var pass, bs, mask []byte if masked { @@ -38,26 +44,33 @@ func getPasswd(masked bool) ([]byte, error) { mask = []byte("*") } - if isTerminal(os.Stdin.Fd()) { - if oldState, err := makeRaw(os.Stdin.Fd()); err != nil { + if isTerminal(r.Fd()) { + if oldState, err := makeRaw(r.Fd()); err != nil { return pass, err } else { - defer restore(os.Stdin.Fd(), oldState) + defer func() { + restore(r.Fd(), oldState) + fmt.Fprintln(w) + }() } } + if prompt != "" { + fmt.Fprint(w, prompt) + } + // Track total bytes read, not just bytes in the password. This ensures any // errors that might flood the console with nil or -1 bytes infinitely are // capped. var counter int for counter = 0; counter <= maxLength; counter++ { - if v, e := getch(); e != nil { + if v, e := getch(r); e != nil { err = e break } else if v == 127 || v == 8 { if l := len(pass); l > 0 { pass = pass[:l-1] - fmt.Print(string(bs)) + fmt.Fprint(w, string(bs)) } } else if v == 13 || v == 10 { break @@ -66,7 +79,7 @@ func getPasswd(masked bool) ([]byte, error) { break } else if v != 0 { pass = append(pass, v) - fmt.Print(string(mask)) + fmt.Fprint(w, string(mask)) } } @@ -74,18 +87,24 @@ func getPasswd(masked bool) ([]byte, error) { err = ErrMaxLengthExceeded } - fmt.Println() return pass, err } // GetPasswd returns the password read from the terminal without echoing input. // The returned byte array does not include end-of-line characters. func GetPasswd() ([]byte, error) { - return getPasswd(false) + return getPasswd("", false, os.Stdin, os.Stdout) } // GetPasswdMasked returns the password read from the terminal, echoing asterisks. // The returned byte array does not include end-of-line characters. func GetPasswdMasked() ([]byte, error) { - return getPasswd(true) + return getPasswd("", true, os.Stdin, os.Stdout) +} + +// GetPasswdPrompt prompts the user and returns the password read from the terminal. +// If mask is true, then asterisks are echoed. +// The returned byte array does not include end-of-line characters. +func GetPasswdPrompt(prompt string, mask bool, r FdReader, w io.Writer) ([]byte, error) { + return getPasswd(prompt, mask, r, w) } diff --git a/vendor/github.com/juju/ratelimit/ratelimit.go b/vendor/github.com/juju/ratelimit/ratelimit.go index 3ef32fbcc..1c3f25b2e 100644 --- a/vendor/github.com/juju/ratelimit/ratelimit.go +++ b/vendor/github.com/juju/ratelimit/ratelimit.go @@ -2,7 +2,7 @@ // Licensed under the LGPLv3 with static-linking exception. // See LICENCE file for details. -// The ratelimit package provides an efficient token bucket implementation +// Package ratelimit provides an efficient token bucket implementation // that can be used to limit the rate of arbitrary things. // See http://en.wikipedia.org/wiki/Token_bucket. package ratelimit @@ -21,6 +21,7 @@ type Bucket struct { capacity int64 quantum int64 fillInterval time.Duration + clock Clock // The mutex guards the fields following it. mu sync.Mutex @@ -33,12 +34,37 @@ type Bucket struct { availTick int64 } +// Clock is used to inject testable fakes. +type Clock interface { + Now() time.Time + Sleep(d time.Duration) +} + +// realClock implements Clock in terms of standard time functions. +type realClock struct{} + +// Now is identical to time.Now. +func (realClock) Now() time.Time { + return time.Now() +} + +// Sleep is identical to time.Sleep. +func (realClock) Sleep(d time.Duration) { + time.Sleep(d) +} + // NewBucket returns a new token bucket that fills at the // rate of one token every fillInterval, up to the given // maximum capacity. Both arguments must be // positive. The bucket is initially full. func NewBucket(fillInterval time.Duration, capacity int64) *Bucket { - return NewBucketWithQuantum(fillInterval, capacity, 1) + return NewBucketWithClock(fillInterval, capacity, realClock{}) +} + +// NewBucketWithClock is identical to NewBucket but injects a testable clock +// interface. +func NewBucketWithClock(fillInterval time.Duration, capacity int64, clock Clock) *Bucket { + return NewBucketWithQuantumAndClock(fillInterval, capacity, 1, clock) } // rateMargin specifes the allowed variance of actual @@ -51,12 +77,18 @@ const rateMargin = 0.01 // at high rates, the actual rate may be up to 1% different from the // specified rate. func NewBucketWithRate(rate float64, capacity int64) *Bucket { + return NewBucketWithRateAndClock(rate, capacity, realClock{}) +} + +// NewBucketWithRateAndClock is identical to NewBucketWithRate but injects a +// testable clock interface. +func NewBucketWithRateAndClock(rate float64, capacity int64, clock Clock) *Bucket { for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) { fillInterval := time.Duration(1e9 * float64(quantum) / rate) if fillInterval <= 0 { continue } - tb := NewBucketWithQuantum(fillInterval, capacity, quantum) + tb := NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, clock) if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin { return tb } @@ -79,6 +111,12 @@ func nextQuantum(q int64) int64 { // the specification of the quantum size - quantum tokens // are added every fillInterval. func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket { + return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, realClock{}) +} + +// NewBucketWithQuantumAndClock is identical to NewBucketWithQuantum but injects +// a testable clock interface. +func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum int64, clock Clock) *Bucket { if fillInterval <= 0 { panic("token bucket fill interval is not > 0") } @@ -89,7 +127,8 @@ func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) * panic("token bucket quantum is not > 0") } return &Bucket{ - startTime: time.Now(), + clock: clock, + startTime: clock.Now(), capacity: capacity, quantum: quantum, avail: capacity, @@ -101,7 +140,7 @@ func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) * // available. func (tb *Bucket) Wait(count int64) { if d := tb.Take(count); d > 0 { - time.Sleep(d) + tb.clock.Sleep(d) } } @@ -113,7 +152,7 @@ func (tb *Bucket) Wait(count int64) { func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool { d, ok := tb.TakeMaxDuration(count, maxWait) if d > 0 { - time.Sleep(d) + tb.clock.Sleep(d) } return ok } @@ -127,7 +166,7 @@ const infinityDuration time.Duration = 0x7fffffffffffffff // Note that if the request is irrevocable - there is no way to return // tokens to the bucket once this method commits us to taking them. func (tb *Bucket) Take(count int64) time.Duration { - d, _ := tb.take(time.Now(), count, infinityDuration) + d, _ := tb.take(tb.clock.Now(), count, infinityDuration) return d } @@ -141,14 +180,14 @@ func (tb *Bucket) Take(count int64) time.Duration { // wait until the tokens are actually available, and reports // true. func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) { - return tb.take(time.Now(), count, maxWait) + return tb.take(tb.clock.Now(), count, maxWait) } // TakeAvailable takes up to count immediately available tokens from the // bucket. It returns the number of tokens removed, or zero if there are // no available tokens. It does not block. func (tb *Bucket) TakeAvailable(count int64) int64 { - return tb.takeAvailable(time.Now(), count) + return tb.takeAvailable(tb.clock.Now(), count) } // takeAvailable is the internal version of TakeAvailable - it takes the @@ -178,7 +217,7 @@ func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 { // tokens could have changed in the meantime. This method is intended // primarily for metrics reporting and debugging. func (tb *Bucket) Available() int64 { - return tb.available(time.Now()) + return tb.available(tb.clock.Now()) } // available is the internal version of available - it takes the current time as diff --git a/vendor/github.com/mitchellh/go-ps/.gitignore b/vendor/github.com/mitchellh/go-ps/.gitignore new file mode 100644 index 000000000..a977916f6 --- /dev/null +++ b/vendor/github.com/mitchellh/go-ps/.gitignore @@ -0,0 +1 @@ +.vagrant/ diff --git a/vendor/github.com/mitchellh/go-ps/.travis.yml b/vendor/github.com/mitchellh/go-ps/.travis.yml new file mode 100644 index 000000000..8f794f71d --- /dev/null +++ b/vendor/github.com/mitchellh/go-ps/.travis.yml @@ -0,0 +1,4 @@ +language: go + +go: + - 1.2.1 diff --git a/vendor/github.com/mitchellh/go-ps/LICENSE.md b/vendor/github.com/mitchellh/go-ps/LICENSE.md new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/go-ps/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-ps/README.md b/vendor/github.com/mitchellh/go-ps/README.md new file mode 100644 index 000000000..8e8baf9d2 --- /dev/null +++ b/vendor/github.com/mitchellh/go-ps/README.md @@ -0,0 +1,34 @@ +# Process List Library for Go + +go-ps is a library for Go that implements OS-specific APIs to list and +manipulate processes in a platform-safe way. The library can find and +list processes on Linux, Mac OS X, Solaris, and Windows. + +If you're new to Go, this library has a good amount of advanced Go educational +value as well. It uses some advanced features of Go: build tags, accessing +DLL methods for Windows, cgo for Darwin, etc. + +How it works: + + * **Darwin** uses the `sysctl` syscall to retrieve the process table. + * **Unix** uses the procfs at `/proc` to inspect the process tree. + * **Windows** uses the Windows API, and methods such as + `CreateToolhelp32Snapshot` to get a point-in-time snapshot of + the process table. + +## Installation + +Install using standard `go get`: + +``` +$ go get github.com/mitchellh/go-ps +... +``` + +## TODO + +Want to contribute? Here is a short TODO list of things that aren't +implemented for this library that would be nice: + + * FreeBSD support + * Plan9 support diff --git a/vendor/github.com/mitchellh/go-ps/Vagrantfile b/vendor/github.com/mitchellh/go-ps/Vagrantfile new file mode 100644 index 000000000..61662ab1e --- /dev/null +++ b/vendor/github.com/mitchellh/go-ps/Vagrantfile @@ -0,0 +1,43 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "chef/ubuntu-12.04" + + config.vm.provision "shell", inline: $script + + ["vmware_fusion", "vmware_workstation"].each do |p| + config.vm.provider "p" do |v| + v.vmx["memsize"] = "1024" + v.vmx["numvcpus"] = "2" + v.vmx["cpuid.coresPerSocket"] = "1" + end + end +end + +$script = <