Merge changes from upstream.

This commit is contained in:
rnburn 2017-10-23 18:05:08 -07:00
commit c26af267c5
1185 changed files with 289703 additions and 14364 deletions

2
.gitignore vendored
View file

@ -28,3 +28,5 @@ Session.vim
# coverage artifacts # coverage artifacts
.coverprofile .coverprofile
/gover.coverprofile /gover.coverprofile
e2e-tests

View file

@ -13,6 +13,12 @@ go:
go_import_path: k8s.io/ingress-nginx go_import_path: k8s.io/ingress-nginx
env:
global:
- CHANGE_MINIKUBE_NONE_USER=true
- KUBERNETES_VERSION=v1.7.5
- DOCKER=docker
jobs: jobs:
include: include:
- stage: Static Check - stage: Static Check
@ -25,3 +31,9 @@ jobs:
- go get github.com/modocache/gover - go get github.com/modocache/gover
- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
- make cover - make cover
- stage: e2e
before_script:
- make e2e-image
- test/e2e/up.sh
script:
- make e2e-test

View file

@ -17,12 +17,12 @@ Follow either of the two links above to access the appropriate CLA and instructi
## Finding Things That Need Help ## Finding Things That Need Help
If you're new to the project and want to help, but don't know where to start, we have a semi-curated list of issues that should not need deep knowledge of the system. [Have a look and see if anything sounds interesting](https://github.com/kubernetes/ingress/issues?utf8=%E2%9C%93&q=is%3Aopen%20is%3Aissue%20label%3A%22help+wanted%22). Alternatively, read some of the docs on other controllers and try to write your own, file and fix any/all issues that come up, including gaps in documentation! If you're new to the project and want to help, but don't know where to start, we have a semi-curated list of issues that should not need deep knowledge of the system. [Have a look and see if anything sounds interesting](https://github.com/kubernetes/ingress-nginx/issues?utf8=%E2%9C%93&q=is%3Aopen%20is%3Aissue%20label%3A%22help+wanted%22). Alternatively, read some of the docs on other controllers and try to write your own, file and fix any/all issues that come up, including gaps in documentation!
## Contributing a Patch ## Contributing a Patch
1. If you haven't already done so, sign a Contributor License Agreement (see details above). 1. If you haven't already done so, sign a Contributor License Agreement (see details above).
1. Read the [Ingress development guide](docs/dev/README.md). 1. Read the [Ingress development guide](docs/development.md).
1. Fork the desired repo, develop and test your code changes. 1. Fork the desired repo, develop and test your code changes.
1. Submit a pull request. 1. Submit a pull request.

134
Gopkg.lock generated
View file

@ -1,6 +1,18 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "eaddaf6dd7ee35fd3c2420c8d27478db176b0485"
version = "v0.15.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
revision = "7aa5b8a6f18b5c15910c767ab005fc4585221177"
version = "v9.1.1"
[[projects]] [[projects]]
name = "github.com/PuerkitoBio/purell" name = "github.com/PuerkitoBio/purell"
packages = ["."] packages = ["."]
@ -34,8 +46,8 @@
[[projects]] [[projects]]
name = "github.com/dgrijalva/jwt-go" name = "github.com/dgrijalva/jwt-go"
packages = ["."] packages = ["."]
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c" revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
version = "v3.0.0" version = "v3.1.0"
[[projects]] [[projects]]
name = "github.com/docker/distribution" name = "github.com/docker/distribution"
@ -54,12 +66,6 @@
revision = "5741799b275a3c4a5a9623a993576d7545cf7b5c" revision = "5741799b275a3c4a5a9623a993576d7545cf7b5c"
version = "v2.4.0" version = "v2.4.0"
[[projects]]
name = "github.com/emicklei/go-restful-swagger12"
packages = ["."]
revision = "dcef7f55730566d41eae5db10e7d6981829720f6"
version = "1.0.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/fullsailor/pkcs7" name = "github.com/fullsailor/pkcs7"
@ -88,7 +94,7 @@
branch = "master" branch = "master"
name = "github.com/go-openapi/spec" name = "github.com/go-openapi/spec"
packages = ["."] packages = ["."]
revision = "48c2a7185575f9103a5a3863eff950bb776899d2" revision = "84b5bee7bcb76f3d17bcbaf421bac44bd5709ca6"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -99,8 +105,8 @@
[[projects]] [[projects]]
name = "github.com/gogo/protobuf" name = "github.com/gogo/protobuf"
packages = ["gogoproto","proto","protoc-gen-gogo/descriptor","sortkeys"] packages = ["gogoproto","proto","protoc-gen-gogo/descriptor","sortkeys"]
revision = "100ba4e885062801d56799d78530b73b178a78f3" revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02"
version = "v0.4" version = "v0.5"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -138,6 +144,12 @@
revision = "ee43cbb60db7bd22502942cccbc39059117352ab" revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
version = "v0.1.0" version = "v0.1.0"
[[projects]]
branch = "master"
name = "github.com/gophercloud/gophercloud"
packages = [".","openstack","openstack/identity/v2/tenants","openstack/identity/v2/tokens","openstack/identity/v3/tokens","openstack/utils","pagination"]
revision = "443743e88335413103dcf1997e46d401b264fbcd"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/gregjones/httpcache" name = "github.com/gregjones/httpcache"
@ -159,14 +171,14 @@
[[projects]] [[projects]]
name = "github.com/imdario/mergo" name = "github.com/imdario/mergo"
packages = ["."] packages = ["."]
revision = "3e95a51e0639b4cf372f2ccf74c86749d747fbdc" revision = "7fe0c75c13abdee74b09fcacef5ea1c6bba6a874"
version = "0.2.2" version = "0.2.4"
[[projects]] [[projects]]
name = "github.com/json-iterator/go" name = "github.com/json-iterator/go"
packages = ["."] packages = ["."]
revision = "6ed27152e0428abfde127acb33b08b03a1e67cac" revision = "6240e1e7983a85228f7fd9c3e1b6932d46ec58e2"
version = "1.0.2" version = "1.0.3"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -190,7 +202,7 @@
branch = "master" branch = "master"
name = "github.com/mailru/easyjson" name = "github.com/mailru/easyjson"
packages = ["buffer","jlexer","jwriter"] packages = ["buffer","jlexer","jwriter"]
revision = "2a92e673c9a6302dd05c3a691ae1f24aef46457d" revision = "efb36c3268025336c3cdd805e3be5f88d1f62b73"
[[projects]] [[projects]]
name = "github.com/matttproud/golang_protobuf_extensions" name = "github.com/matttproud/golang_protobuf_extensions"
@ -208,7 +220,13 @@
branch = "master" branch = "master"
name = "github.com/mitchellh/mapstructure" name = "github.com/mitchellh/mapstructure"
packages = ["."] packages = ["."]
revision = "d0303fe809921458f417bcf828397a65db30a7e4" revision = "06020f85339e21b2478f756a78e295255ffa4d6a"
[[projects]]
branch = "master"
name = "github.com/moul/http2curl"
packages = ["."]
revision = "9ac6cf4d929b2fa8fd2d2e6dec5bb0feb4f4911d"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -217,12 +235,30 @@
revision = "5917bc766b95a1fa3c2ae85340f4de02a6b7e15e" revision = "5917bc766b95a1fa3c2ae85340f4de02a6b7e15e"
source = "github.com/aledbf/process-exporter" source = "github.com/aledbf/process-exporter"
[[projects]]
name = "github.com/onsi/ginkgo"
packages = [".","config","internal/codelocation","internal/containernode","internal/failer","internal/leafnodes","internal/remote","internal/spec","internal/spec_iterator","internal/specrunner","internal/suite","internal/testingtproxy","internal/writer","reporters","reporters/stenographer","reporters/stenographer/support/go-colorable","reporters/stenographer/support/go-isatty","types"]
revision = "9eda700730cba42af70d53180f9dcce9266bc2bc"
version = "v1.4.0"
[[projects]]
name = "github.com/onsi/gomega"
packages = [".","format","internal/assertion","internal/asyncassertion","internal/oraclematcher","internal/testingtsupport","matchers","matchers/support/goraph/bipartitegraph","matchers/support/goraph/edge","matchers/support/goraph/node","matchers/support/goraph/util","types"]
revision = "c893efa28eb45626cdaa76c9f653b62488858837"
version = "v1.2.0"
[[projects]] [[projects]]
name = "github.com/opencontainers/go-digest" name = "github.com/opencontainers/go-digest"
packages = ["."] packages = ["."]
revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf"
version = "v1.0.0-rc1" version = "v1.0.0-rc1"
[[projects]]
name = "github.com/parnurzeal/gorequest"
packages = ["."]
revision = "a578a48e8d6ca8b01a3b18314c43c6716bb5f5a3"
version = "v0.2.15"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/paultag/sniff" name = "github.com/paultag/sniff"
@ -275,7 +311,7 @@
branch = "master" branch = "master"
name = "github.com/prometheus/procfs" name = "github.com/prometheus/procfs"
packages = [".","xfs"] packages = [".","xfs"]
revision = "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2" revision = "a6e9df898b1336106c743392c48ee0b71f5c4efa"
[[projects]] [[projects]]
name = "github.com/spf13/pflag" name = "github.com/spf13/pflag"
@ -293,25 +329,37 @@
branch = "master" branch = "master"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
packages = ["ssh/terminal"] packages = ["ssh/terminal"]
revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3" revision = "541b9d50ad47e36efd8fb423e938e59ff1691f68"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/net" name = "golang.org/x/net"
packages = ["context","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"] packages = ["context","context/ctxhttp","html","html/atom","html/charset","http2","http2/hpack","idna","internal/timeseries","lex/httplex","publicsuffix","trace"]
revision = "a04bdaca5b32abe1c069418fb7088ae607de5bd0" revision = "cd69bc3fc700721b709c3a59e16e24c67b58f6ff"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix","windows"] packages = ["unix","windows"]
revision = "ebfc5b4631820b793c9010c87fd8fef0f39eb082" revision = "8dbc5d05d6edcc104950cc299a1ce6641235bc86"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/text" name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"] packages = ["collate","collate/build","encoding","encoding/charmap","encoding/htmlindex","encoding/internal","encoding/internal/identifier","encoding/japanese","encoding/korean","encoding/simplifiedchinese","encoding/traditionalchinese","encoding/unicode","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","internal/utf8internal","language","runes","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
revision = "825fc78a2fd6fa0a5447e300189e3219e05e1f25" revision = "c01e4764d870b77f8abe5096ee19ad20d80e8075"
[[projects]]
name = "google.golang.org/appengine"
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -321,9 +369,9 @@
[[projects]] [[projects]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
packages = [".","codes","connectivity","credentials","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"] packages = [".","balancer","codes","connectivity","credentials","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","stats","status","tap","transport"]
revision = "f92cdcd7dcdc69e81b2d7b338479a19a8723cfa3" revision = "f7bf885db0b7479a537ec317c6e48ce53145f3db"
version = "v1.6.0" version = "v1.7.0"
[[projects]] [[projects]]
name = "gopkg.in/fsnotify.v1" name = "gopkg.in/fsnotify.v1"
@ -350,46 +398,42 @@
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
[[projects]] [[projects]]
branch = "master"
name = "k8s.io/api" name = "k8s.io/api"
packages = ["admissionregistration/v1alpha1","apps/v1beta1","apps/v1beta2","authentication/v1","authentication/v1beta1","authorization/v1","authorization/v1beta1","autoscaling/v1","autoscaling/v2beta1","batch/v1","batch/v1beta1","batch/v2alpha1","certificates/v1beta1","core/v1","extensions/v1beta1","networking/v1","policy/v1beta1","rbac/v1","rbac/v1alpha1","rbac/v1beta1","scheduling/v1alpha1","settings/v1alpha1","storage/v1","storage/v1beta1"] packages = ["admissionregistration/v1alpha1","apps/v1","apps/v1beta1","apps/v1beta2","authentication/v1","authentication/v1beta1","authorization/v1","authorization/v1beta1","autoscaling/v1","autoscaling/v2beta1","batch/v1","batch/v1beta1","batch/v2alpha1","certificates/v1beta1","core/v1","extensions/v1beta1","networking/v1","policy/v1beta1","rbac/v1","rbac/v1alpha1","rbac/v1beta1","scheduling/v1alpha1","settings/v1alpha1","storage/v1","storage/v1beta1"]
revision = "81aa34336d28aadc3a8e8da7dfd9258c5157e5e4" revision = "409c3b2393cd3359f275bd8b883b4d9c5aec41f6"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "k8s.io/apiextensions-apiserver" name = "k8s.io/apiextensions-apiserver"
packages = ["pkg/features"] packages = ["pkg/apis/apiextensions","pkg/apis/apiextensions/v1beta1","pkg/client/clientset/clientset","pkg/client/clientset/clientset/scheme","pkg/client/clientset/clientset/typed/apiextensions/v1beta1","pkg/features"]
revision = "a5bbfd114a9b122acd741c61d88c84812375d9e1" revision = "83f1a7e2b8384c17fb102b6616e08825ad12992d"
[[projects]] [[projects]]
branch = "master"
name = "k8s.io/apimachinery" name = "k8s.io/apimachinery"
packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/api/validation","pkg/apimachinery","pkg/apimachinery/announced","pkg/apimachinery/registered","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1/validation","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/httpstream","pkg/util/httpstream/spdy","pkg/util/intstr","pkg/util/json","pkg/util/mergepatch","pkg/util/net","pkg/util/rand","pkg/util/remotecommand","pkg/util/runtime","pkg/util/sets","pkg/util/strategicpatch","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/json","third_party/forked/golang/netutil","third_party/forked/golang/reflect"] packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/api/validation","pkg/apimachinery","pkg/apimachinery/announced","pkg/apimachinery/registered","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1/validation","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/httpstream","pkg/util/httpstream/spdy","pkg/util/intstr","pkg/util/json","pkg/util/mergepatch","pkg/util/net","pkg/util/remotecommand","pkg/util/runtime","pkg/util/sets","pkg/util/strategicpatch","pkg/util/uuid","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/json","third_party/forked/golang/netutil","third_party/forked/golang/reflect"]
revision = "3b05bbfa0a45413bfa184edbf9af617e277962fb" revision = "e9a29eff7d472df0f7da9ead5ab59b74e74a07ec"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "k8s.io/apiserver" name = "k8s.io/apiserver"
packages = ["pkg/authentication/authenticator","pkg/authentication/serviceaccount","pkg/authentication/user","pkg/features","pkg/server/healthz","pkg/util/feature"] packages = ["pkg/authentication/authenticator","pkg/authentication/serviceaccount","pkg/authentication/user","pkg/features","pkg/server/healthz","pkg/util/feature","pkg/util/logs"]
revision = "c1e53d745d0fe45bf7d5d44697e6eface25fceca" revision = "d299c880c4e33854f8c45bdd7ab599fb54cbe575"
[[projects]] [[projects]]
branch = "master"
name = "k8s.io/client-go" name = "k8s.io/client-go"
packages = ["discovery","discovery/fake","kubernetes","kubernetes/fake","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/admissionregistration/v1alpha1/fake","kubernetes/typed/apps/v1beta1","kubernetes/typed/apps/v1beta1/fake","kubernetes/typed/apps/v1beta2","kubernetes/typed/apps/v1beta2/fake","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1/fake","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authentication/v1beta1/fake","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1/fake","kubernetes/typed/authorization/v1beta1","kubernetes/typed/authorization/v1beta1/fake","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v1/fake","kubernetes/typed/autoscaling/v2beta1","kubernetes/typed/autoscaling/v2beta1/fake","kubernetes/typed/batch/v1","kubernetes/typed/batch/v1/fake","kubernetes/typed/batch/v1beta1","kubernetes/typed/batch/v1beta1/fake","kubernetes/typed/batch/v2alpha1","kubernetes/typed/batch/v2alpha1/fake","kubernetes/typed/certificates/v1beta1","kubernetes/typed/certificates/v1beta1/fake","kubernetes/typed/core/v1","kubernetes/typed/core/v1/fake","kubernetes/typed/extensions/v1beta1","kubernetes/typed/extensions/v1beta1/fake","kubernetes/typed/networking/v1","kubernetes/typed/networking/v1/fake","kubernetes/typed/policy/v1beta1","kubernetes/typed/policy/v1beta1/fake","kubernetes/typed/rbac/v1","kubernetes/typed/rbac/v1/fake","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1alpha1/fake","kubernetes/typed/rbac/v1beta1","kubernetes/typed/rbac/v1beta1/fake","kubernetes/typed/scheduling/v1alpha1","kubernetes/typed/scheduling/v1alpha1/fake","kubernetes/typed/settings/v1alpha1","kubernetes/typed/settings/v1alpha1/fake","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1/fake","kubernetes/typed/storage/v1beta1","kubernetes/typed/storage/v1beta1/fake","pkg/version","rest","rest/watch","testing","tools/auth","tools/cache","tools/cache/testing","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/leaderelection","tools/leaderelection/resourcelock","tools/metrics","tools/pager","tools/record","tools/reference","tools/remotecommand","transport","transport/spdy","util/cert","util/cert/triple","util/exec","util/flowcontrol","util/homedir","util/integer","util/retry","util/workqueue"] packages = ["discovery","discovery/fake","kubernetes","kubernetes/fake","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/admissionregistration/v1alpha1/fake","kubernetes/typed/apps/v1","kubernetes/typed/apps/v1/fake","kubernetes/typed/apps/v1beta1","kubernetes/typed/apps/v1beta1/fake","kubernetes/typed/apps/v1beta2","kubernetes/typed/apps/v1beta2/fake","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1/fake","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authentication/v1beta1/fake","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1/fake","kubernetes/typed/authorization/v1beta1","kubernetes/typed/authorization/v1beta1/fake","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v1/fake","kubernetes/typed/autoscaling/v2beta1","kubernetes/typed/autoscaling/v2beta1/fake","kubernetes/typed/batch/v1","kubernetes/typed/batch/v1/fake","kubernetes/typed/batch/v1beta1","kubernetes/typed/batch/v1beta1/fake","kubernetes/typed/batch/v2alpha1","kubernetes/typed/batch/v2alpha1/fake","kubernetes/typed/certificates/v1beta1","kubernetes/typed/certificates/v1beta1/fake","kubernetes/typed/core/v1","kubernetes/typed/core/v1/fake","kubernetes/typed/extensions/v1beta1","kubernetes/typed/extensions/v1beta1/fake","kubernetes/typed/networking/v1","kubernetes/typed/networking/v1/fake","kubernetes/typed/policy/v1beta1","kubernetes/typed/policy/v1beta1/fake","kubernetes/typed/rbac/v1","kubernetes/typed/rbac/v1/fake","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1alpha1/fake","kubernetes/typed/rbac/v1beta1","kubernetes/typed/rbac/v1beta1/fake","kubernetes/typed/scheduling/v1alpha1","kubernetes/typed/scheduling/v1alpha1/fake","kubernetes/typed/settings/v1alpha1","kubernetes/typed/settings/v1alpha1/fake","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1/fake","kubernetes/typed/storage/v1beta1","kubernetes/typed/storage/v1beta1/fake","pkg/version","plugin/pkg/client/auth","plugin/pkg/client/auth/azure","plugin/pkg/client/auth/gcp","plugin/pkg/client/auth/oidc","plugin/pkg/client/auth/openstack","rest","rest/watch","testing","third_party/forked/golang/template","tools/auth","tools/cache","tools/cache/testing","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/leaderelection","tools/leaderelection/resourcelock","tools/metrics","tools/pager","tools/record","tools/reference","tools/remotecommand","transport","transport/spdy","util/buffer","util/cert","util/cert/triple","util/exec","util/flowcontrol","util/homedir","util/integer","util/jsonpath","util/retry","util/workqueue"]
revision = "82aa063804cf055e16e8911250f888bc216e8b61" revision = "076e344c86e52f088b78615f815b245f6d613537"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "k8s.io/kube-openapi" name = "k8s.io/kube-openapi"
packages = ["pkg/common"] packages = ["pkg/common"]
revision = "abfc5fbe1cf87ee697db107fdfd24c32fe4397a8" revision = "89ae48fe8691077463af5b7fb3b6f194632c5946"
[[projects]] [[projects]]
name = "k8s.io/kubernetes" name = "k8s.io/kubernetes"
packages = ["pkg/api","pkg/api/helper","pkg/api/install","pkg/api/service","pkg/api/util","pkg/api/v1","pkg/api/v1/helper","pkg/api/v1/pod","pkg/api/validation","pkg/apis/extensions","pkg/apis/networking","pkg/capabilities","pkg/cloudprovider","pkg/controller","pkg/features","pkg/kubelet/apis","pkg/kubelet/apis/cri/v1alpha1/runtime","pkg/kubelet/container","pkg/kubelet/types","pkg/kubelet/util/format","pkg/kubelet/util/ioutils","pkg/kubelet/util/sliceutils","pkg/security/apparmor","pkg/serviceaccount","pkg/util/file","pkg/util/hash","pkg/util/io","pkg/util/mount","pkg/util/net/sets","pkg/util/parsers","pkg/util/pointer","pkg/util/sysctl","pkg/util/taints","pkg/volume","pkg/volume/util","third_party/forked/golang/expansion"] packages = ["pkg/api","pkg/api/helper","pkg/api/install","pkg/api/service","pkg/api/v1","pkg/api/v1/helper","pkg/api/v1/pod","pkg/api/validation","pkg/apis/extensions","pkg/apis/networking","pkg/capabilities","pkg/cloudprovider","pkg/controller","pkg/features","pkg/kubelet/apis","pkg/kubelet/apis/cri/v1alpha1/runtime","pkg/kubelet/container","pkg/kubelet/types","pkg/kubelet/util/format","pkg/kubelet/util/ioutils","pkg/kubelet/util/sliceutils","pkg/security/apparmor","pkg/serviceaccount","pkg/util/file","pkg/util/hash","pkg/util/io","pkg/util/mount","pkg/util/net/sets","pkg/util/nsenter","pkg/util/parsers","pkg/util/pointer","pkg/util/sysctl","pkg/util/taints","pkg/volume","pkg/volume/util","third_party/forked/golang/expansion"]
revision = "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" revision = "88975e98d6f4a84929a243abeb772de16399a4e9"
version = "v1.8.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -400,6 +444,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "4511b8e7ec7b35d8752f919dfe51b39a07852a98de8d765ce7f2512032450cde" inputs-digest = "e4478c928d7b640014a4b33204ea0684e5ea543b24094a1a4a35a416446beb9b"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View file

@ -38,7 +38,7 @@
[[constraint]] [[constraint]]
name = "github.com/imdario/mergo" name = "github.com/imdario/mergo"
version = "0.2.2" version = "0.2.4"
[[constraint]] [[constraint]]
branch = "master" branch = "master"
@ -89,22 +89,22 @@
name = "gopkg.in/go-playground/pool.v3" name = "gopkg.in/go-playground/pool.v3"
version = "3.1.1" version = "3.1.1"
[[constraint]]
branch = "master"
name = "k8s.io/api"
[[constraint]]
branch = "master"
name = "k8s.io/apimachinery"
[[constraint]] [[constraint]]
branch = "master" branch = "master"
name = "k8s.io/apiserver" name = "k8s.io/apiserver"
[[constraint]] [[constraint]]
branch = "master" name = "k8s.io/api"
revision = "409c3b2393cd3359f275bd8b883b4d9c5aec41f6"
[[constraint]]
name = "k8s.io/apimachinery"
revision = "e9a29eff7d472df0f7da9ead5ab59b74e74a07ec"
[[constraint]]
name = "k8s.io/client-go" name = "k8s.io/client-go"
revision = "076e344c86e52f088b78615f815b245f6d613537"
[[constraint]] [[constraint]]
name = "k8s.io/kubernetes" name = "k8s.io/kubernetes"
version = "1.8.0" revision = "88975e98d6f4a84929a243abeb772de16399a4e9"

View file

@ -1,3 +1,17 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
all: push all: push
BUILDTAGS= BUILDTAGS=
@ -35,7 +49,7 @@ IMAGE = $(REGISTRY)/$(IMGNAME)
MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) MULTI_ARCH_IMG = $(IMAGE)-$(ARCH)
# Set default base image dynamically for each arch # Set default base image dynamically for each arch
BASEIMAGE?=gcr.io/google_containers/nginx-slim-$(ARCH):0.26 BASEIMAGE?=gcr.io/google_containers/nginx-slim-$(ARCH):0.27
ifeq ($(ARCH),arm) ifeq ($(ARCH),arm)
QEMUARCH=arm QEMUARCH=arm
@ -116,15 +130,23 @@ fmt:
lint: lint:
@echo "+ $@" @echo "+ $@"
@go list -f '{{if len .TestGoFiles}}"golint {{.Dir}}/..."{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c @go list -f '{{if len .TestGoFiles}}"golint {{.Dir}}/..."{{end}}' $(shell go list ${PKG}/... | grep -v vendor | grep -v '/test/e2e') | xargs -L 1 sh -c
test: fmt lint vet test: fmt lint vet
@echo "+ $@" @echo "+ $@"
@go test -v -race -tags "$(BUILDTAGS) cgo" $(shell go list ${PKG}/... | grep -v vendor) @go test -v -race -tags "$(BUILDTAGS) cgo" $(shell go list ${PKG}/... | grep -v vendor | grep -v '/test/e2e')
e2e-image: sub-container-amd64
TAG=$(TAG) IMAGE=$(MULTI_ARCH_IMG) docker tag $(IMAGE):$(TAG) $(IMAGE):test
docker images
e2e-test:
@go test -o e2e-tests -c ./test/e2e
@KUBECONFIG=${HOME}/.kube/config INGRESSNGINXCONFIG=${HOME}/.kube/config ./e2e-tests
cover: cover:
@echo "+ $@" @echo "+ $@"
@go list -f '{{if len .TestGoFiles}}"go test -coverprofile={{.Dir}}/.coverprofile {{.ImportPath}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor) | xargs -L 1 sh -c @go list -f '{{if len .TestGoFiles}}"go test -coverprofile={{.Dir}}/.coverprofile {{.ImportPath}}"{{end}}' $(shell go list ${PKG}/... | grep -v vendor | grep -v '/test/e2e') | xargs -L 1 sh -c
gover gover
goveralls -coverprofile=gover.coverprofile -service travis-ci -repotoken ${COVERALLS_TOKEN} goveralls -coverprofile=gover.coverprofile -service travis-ci -repotoken ${COVERALLS_TOKEN}
@ -140,3 +162,7 @@ docker-build: all-container
.PHONY: docker-push .PHONY: docker-push
docker-push: all-push docker-push: all-push
.PHONY: check_dead_links
check_dead_links:
docker run -t -v $$PWD:/tmp rubygem/awesome_bot --allow-dupe --allow-redirect $(shell find $$PWD -name "*.md" -mindepth 1 -printf '%P\n' | grep -v vendor | grep -v Changelog.md)

View file

@ -74,8 +74,8 @@ This setup requires to choose in which layer (L4 or L7) we want to configure the
For L4: For L4:
```console ```console
kubectl apply -f provider/aws/service-l4.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/service-l4.yaml
kubectl apply -f provider/aws/patch-configmap-l4.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l4.yaml
``` ```
For L7: For L7:
@ -85,7 +85,7 @@ Then execute:
```console ```console
kubectl apply -f provider/aws/service-l7.yaml kubectl apply -f provider/aws/service-l7.yaml
kubectl apply -f provider/aws/patch-configmap-l7.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l7.yaml
``` ```
This example creates an ELB with just two listeners, one in port 80 and another in port 443 This example creates an ELB with just two listeners, one in port 80 and another in port 443
@ -95,13 +95,13 @@ This example creates an ELB with just two listeners, one in port 80 and another
If the ingress controller uses RBAC run: If the ingress controller uses RBAC run:
```console ```console
kubectl apply -f provider/patch-service-with-rbac.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml
``` ```
If not run: If not run:
```console ```console
kubectl apply -f provider/patch-service-without-rbac.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml
``` ```
### GCE - GKE ### GCE - GKE
@ -114,13 +114,13 @@ curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/pr
If the ingress controller uses RBAC run: If the ingress controller uses RBAC run:
```console ```console
kubectl apply -f provider/patch-service-with-rbac.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml
``` ```
If not run: If not run:
```console ```console
kubectl apply -f provider/patch-service-without-rbac.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml
``` ```
**Important Note:** proxy protocol is not supported in GCE/GKE **Important Note:** proxy protocol is not supported in GCE/GKE
@ -135,13 +135,13 @@ curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/pr
If the ingress controller uses RBAC run: If the ingress controller uses RBAC run:
```console ```console
kubectl apply -f provider/patch-service-with-rbac.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml
``` ```
If not run: If not run:
```console ```console
kubectl apply -f provider/patch-service-without-rbac.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml
``` ```
**Important Note:** proxy protocol is not supported in GCE/GKE **Important Note:** proxy protocol is not supported in GCE/GKE

View file

@ -16,4 +16,4 @@ spec:
targetPort: http targetPort: http
- name: https - name: https
port: 443 port: 443
targetPort: http targetPort: https

View file

@ -52,12 +52,20 @@ Key:
| `base-url-scheme` | Specify the scheme of the `<base>` tags. | | nginx | `base-url-scheme` | Specify the scheme of the `<base>` tags. | | nginx
| `preserve-host` | Whether to pass the client request host (`true`) or the origin hostname (`false`) in the HTTP Host field. | | trafficserver | `preserve-host` | Whether to pass the client request host (`true`) or the origin hostname (`false`) in the HTTP Host field. | | trafficserver
## CORS Related
| Name | Meaning | Default | Controller
| --- | --- | --- | --- |
| `enable-cors` | Enable CORS headers in response. | false | nginx, voyager
| `cors-allow-origin` | Specifies the Origin allowed in CORS (Access-Control-Allow-Origin) | * | nginx
| `cors-allow-headers` | Specifies the Headers allowed in CORS (Access-Control-Allow-Headers) | DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization | nginx
| `cors-allow-methods` | Specifies the Methods allowed in CORS (Access-Control-Allow-Methods) | GET, PUT, POST, DELETE, PATCH, OPTIONS | nginx
| `cors-allow-credentials` | Specifies the Access-Control-Allow-Credentials | true | nginx
## Miscellaneous ## Miscellaneous
| Name | Meaning | Default | Controller | Name | Meaning | Default | Controller
| --- | --- | --- | --- | | --- | --- | --- | --- |
| `configuration-snippet` | Arbitrary text to put in the generated configuration file. | | nginx | `configuration-snippet` | Arbitrary text to put in the generated configuration file. | | nginx
| `enable-cors` | Enable CORS headers in response. | | nginx, voyager
| `limit-connections` | Limit concurrent connections per IP address[1]. | | nginx, voyager | `limit-connections` | Limit concurrent connections per IP address[1]. | | nginx, voyager
| `limit-rps` | Limit requests per second per IP address[1]. | | nginx, voyager | `limit-rps` | Limit requests per second per IP address[1]. | | nginx, voyager
| `limit-rpm` | Limit requests per minute per IP address. | | nginx, voyager | `limit-rpm` | Limit requests per minute per IP address. | | nginx, voyager

126
docs/development.md Normal file
View file

@ -0,0 +1,126 @@
# Getting Started
This document explains how to get started with developing for NGINX Ingress controller.
It includes how to build, test, and release ingress controllers.
## Dependencies
The build uses dependencies in the `vendor` directory, which
must be installed before building a binary/image. Occasionally, you
might need to update the dependencies.
This guide requires you to install the [dep](https://github.com/golang/dep) dependency tool.
Check the version of `dep` you are using and make sure it is up to date.
```console
$ dep version
dep:
version : devel
build date :
git hash :
go version : go1.9
go compiler : gc
platform : linux/amd64
```
If you have an older version of `dep`, you can update it as follows:
```console
$ go get -u github.com/golang/dep
```
This will automatically save the dependencies to the `vendor/` directory.
```console
$ cd $GOPATH/src/ingress-nginx
$ dep ensure
$ dep ensure -update
$ dep prune
```
## Building
All ingress controllers are built through a Makefile. Depending on your
requirements you can build a raw server binary, a local container image,
or push an image to a remote repository.
In order to use your local Docker, you may need to set the following environment variables:
```console
# "gcloud docker" (default) or "docker"
$ export DOCKER=<docker>
# "gcr.io/google_containers" (default), "index.docker.io", or your own registry
$ export REGISTRY=<your-docker-registry>
```
To find the registry simply run: `docker system info | grep Registry`
### Nginx Controller
Build a raw server binary
```console
$ make build
```
[TODO](https://github.com/kubernetes/ingress-nginx/issues/387): add more specific instructions needed for raw server binary.
Build a local container image
```console
$ make docker-build TAG=<tag> PREFIX=$USER/ingress-controller
```
Push the container image to a remote repository
```console
$ make docker-push TAG=<tag> PREFIX=$USER/ingress-controller
```
## Deploying
There are several ways to deploy the ingress controller onto a cluster.
Please check the [deployment guide](../deploy/README.md)
## Testing
To run unit-tests, just run
```console
$ cd $GOPATH/src/k8s.io/ingress-nginx
$ make test
```
If you have access to a Kubernetes cluster, you can also run e2e tests using ginkgo.
```console
$ cd $GOPATH/src/k8s.io/ingress-nginx
$ make e2e-test
```
## Releasing
All Makefiles will produce a release binary, as shown above. To publish this
to a wider Kubernetes user base, push the image to a container registry, like
[gcr.io](https://cloud.google.com/container-registry/). All release images are hosted under `gcr.io/google_containers` and
tagged according to a [semver](http://semver.org/) scheme.
An example release might look like:
```
$ make release
```
Please follow these guidelines to cut a release:
* Update the [release](https://help.github.com/articles/creating-releases/)
page with a short description of the major changes that correspond to a given
image tag.
* Cut a release branch, if appropriate. Release branches follow the format of
`controller-release-version`. Typically, pre-releases are cut from HEAD.
All major feature work is done in HEAD. Specific bug fixes are
cherry-picked into a release branch.
* If you're not confident about the stability of the code,
[tag](https://help.github.com/articles/working-with-tags/) it as alpha or beta.
Typically, a release branch should have stable code.

View file

@ -4,58 +4,17 @@ This directory contains a catalog of examples on how to run, configure and
scale Ingress. Please review the [prerequisites](PREREQUISITES.md) before scale Ingress. Please review the [prerequisites](PREREQUISITES.md) before
trying them. trying them.
## Basic cross platform
Name | Description | Complexity Level
-----| ----------- | ----------------
Deployment | basic deployment of controllers | Beginner
TLS termination | terminate TLS at the ingress controller | Beginner
Name based virtual hosting | `Host` header routing | Beginner
Path routing | URL regex routing | Beginner
Health checking | configure/optimize health checks | Intermediate
Pipeline | pipeline cloud and nginx | Advanced
## AWS
Name | Description | Complexity Level
-----| ----------- | ----------------
AWS | basic deployment | Intermediate
## TLS
Name | Description | Complexity Level
-----| ----------- | ----------------
LetsEncrypt | acquire certs via ACME protocol | Intermediate
Intermediate certs | terminate TLS with intermediate certs | Advanced
Client certs | client cert authentication | Advanced
Re-encrypty | terminate, apply routing rules, re-encrypt | Advanced
## Scaling ## Scaling
Name | Description | Complexity Level Name | Description | Complexity Level
-----| ----------- | ---------------- -----| ----------- | ----------------
Daemonset | run multiple controllers in a daemonset | Intermediate
Deployment | run multiple controllers as a deployment | Intermediate
Static-ip | a single ingress gets a single static ip | Intermediate Static-ip | a single ingress gets a single static ip | Intermediate
Geo-routing | route to geographically closest endpoint | Advanced
## Algorithms ## Algorithms
Name | Description | Complexity Level Name | Description | Complexity Level
-----| ----------- | ---------------- -----| ----------- | ----------------
Session stickyness | route requests consistently to the same endpoint | Advanced Session stickyness | route requests consistently to the same endpoint | Advanced
Least connections | route requests based on least connections | Advanced
Weights | route requests to backends based on weights | Advanced
## Routing
Name | Description | Complexity Level
-----| ----------- | ----------------
Redirects | send a 301 re-direct | Intermediate
URL-rewriting | re-write path | Intermediate
SNI + HTTP | HTTP routing based on SNI hostname | Advanced
SNI + TCP | TLS routing based on SNI hostname | Advanced
## Auth ## Auth
@ -64,31 +23,9 @@ Name | Description | Complexity Level
Basic auth | password protect your website | nginx | Intermediate Basic auth | password protect your website | nginx | Intermediate
[External auth plugin](external-auth/README.md) | defer to an external auth service | Intermediate [External auth plugin](external-auth/README.md) | defer to an external auth service | Intermediate
## Protocols
Name | Description | Complexity Level
-----| ----------- | ----------------
TCP | TCP loadbalancing | Intermediate
UDP | UDP loadbalancing | Intermediate
Websockets | websockets loadbalancing | Intermediate
HTTP/2 | HTTP/2 loadbalancing | Intermediate
Proxy protocol | leverage the proxy protocol for source IP | Advanced
## Custom controllers
Name | Description | Complexity Level
-----| ----------- | ----------------
Dummy | A simple dummy controller that logs updates | Advanced
## Customization ## Customization
Name | Description | Complexity Level Name | Description | Complexity Level
-----| ----------- | ---------------- -----| ----------- | ----------------
custom-headers | set custom headers before send traffic to backends | Advanced
configuration-snippets | customize nginx location configuration using annotations | Advanced configuration-snippets | customize nginx location configuration using annotations | Advanced
custom-headers | set custom headers before send traffic to backends | Advanced
## RBAC
Name | Description | Complexity Level
-----| ----------- | ----------------
rbac | Configuring Role Base Access Control | intermediate

View file

@ -16,7 +16,7 @@ metadata:
``` ```
``` ```
curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/customization/cutom-configuration/configmap.yaml \ curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-configuration/configmap.yaml \
| kubectl apply -f - | kubectl apply -f -
``` ```

View file

@ -1,4 +1,4 @@
This example shows how is possible to use a custom backend to render custom error pages. The code of this example is located here [custom-error-pages](https://github.com/kubernetes/ingress/tree/master/examples/customization/custom-errors/nginx) This example shows how is possible to use a custom backend to render custom error pages. The code of this example is located here [custom-error-pages](https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-errors)
The idea is to use the headers `X-Code` and `X-Format` that NGINX pass to the backend in case of an error to find out the best existent representation of the response to be returned. i.e. if the request contains an `Accept` header of type `json` the error should be in that format and not in `html` (the default in NGINX). The idea is to use the headers `X-Code` and `X-Format` that NGINX pass to the backend in case of an error to find out the best existent representation of the response to be returned. i.e. if the request contains an `Accept` header of type `json` the error should be in that format and not in `html` (the default in NGINX).

View file

@ -5,10 +5,10 @@ use a ConfigMap to configure a custom list of headers to be passed to the upstre
server server
```console ```console
curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/customization/custom-headers/configmap.yaml \ curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/configmap.yaml \
| kubectl apply -f - | kubectl apply -f -
curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/customization/custom-headers/custom-headers.yaml \ curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/custom-headers.yaml \
| kubectl apply -f - | kubectl apply -f -
``` ```

View file

@ -20,6 +20,10 @@ The following annotations are supported:
|[ingress.kubernetes.io/configuration-snippet](#configuration-snippet)|string| |[ingress.kubernetes.io/configuration-snippet](#configuration-snippet)|string|
|[ingress.kubernetes.io/default-backend](#default-backend)|string| |[ingress.kubernetes.io/default-backend](#default-backend)|string|
|[ingress.kubernetes.io/enable-cors](#enable-cors)|true or false| |[ingress.kubernetes.io/enable-cors](#enable-cors)|true or false|
|[ingress.kubernetes.io/cors-allow-origin](#enable-cors)|string|
|[ingress.kubernetes.io/cors-allow-methods](#enable-cors)|string|
|[ingress.kubernetes.io/cors-allow-headers](#enable-cors)|string|
|[ingress.kubernetes.io/cors-allow-credentials](#enable-cors)|true or false|
|[ingress.kubernetes.io/force-ssl-redirect](#server-side-https-enforcement-through-redirect)|true or false| |[ingress.kubernetes.io/force-ssl-redirect](#server-side-https-enforcement-through-redirect)|true or false|
|[ingress.kubernetes.io/from-to-www-redirect](#redirect-from-to-www)|true or false| |[ingress.kubernetes.io/from-to-www-redirect](#redirect-from-to-www)|true or false|
|[ingress.kubernetes.io/limit-connections](#rate-limiting)|number| |[ingress.kubernetes.io/limit-connections](#rate-limiting)|number|
@ -162,6 +166,26 @@ This is a global configuration for the ingress controller. In some cases could b
### Enable CORS ### Enable CORS
To enable Cross-Origin Resource Sharing (CORS) in an Ingress rule add the annotation `ingress.kubernetes.io/enable-cors: "true"`. This will add a section in the server location enabling this functionality. To enable Cross-Origin Resource Sharing (CORS) in an Ingress rule add the annotation `ingress.kubernetes.io/enable-cors: "true"`. This will add a section in the server location enabling this functionality.
CORS can be controlled with the following annotations:
* `ingress.kubernetes.io/cors-allow-methods` controls which methods are accepted. This is a multi-valued field, separated by ',' and accepts only letters (upper and lower case).
Example: `ingress.kubernetes.io/cors-allow-methods: "PUT, GET, POST, OPTIONS"`
* `ingress.kubernetes.io/cors-allow-headers` controls which headers are accepted. This is a multi-valued field, separated by ',' and accepts letters, numbers, _ and -.
Example: `ingress.kubernetes.io/cors-allow-methods: "X-Forwarded-For, X-app123-XPTO"`
* `ingress.kubernetes.io/cors-allow-origin` controls what's the accepted Origin for CORS and defaults to '*'. This is a single field value, with the following format: http(s)://origin-site.com or http(s)://origin-site.com:port
Example: `ingress.kubernetes.io/cors-allow-origin: "https://origin-site.com:4443"`
* `ingress.kubernetes.io/cors-allow-credentials` controls if credentials can be passed during CORS operations.
Example: `ingress.kubernetes.io/cors-allow-credentials: "true"`
For more information please check https://enable-cors.org/server_nginx.html For more information please check https://enable-cors.org/server_nginx.html
### Server Alias ### Server Alias
@ -264,7 +288,7 @@ By default NGINX uses `http` to reach the services. Adding the annotation `ingre
### Service Upstream ### Service Upstream
By default the NGINX ingress controller uses a list of all endpoints (Pod IP/port) in the NGINX upstream configuration. This annotation disables that behavior and instead uses a single upstream in NGINX, the service's Cluster IP and port. This can be desirable for things like zero-downtime deployments as it reduces the need to reload NGINX configuration when Pods come up and down. See issue [#257](https://github.com/kubernetes/ingress/issues/257). By default the NGINX ingress controller uses a list of all endpoints (Pod IP/port) in the NGINX upstream configuration. This annotation disables that behavior and instead uses a single upstream in NGINX, the service's Cluster IP and port. This can be desirable for things like zero-downtime deployments as it reduces the need to reload NGINX configuration when Pods come up and down. See issue [#257](https://github.com/kubernetes/ingress-nginx/issues/257).
#### Known Issues #### Known Issues

View file

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
# 0.0.0 shouldn't clobber any released builds # 0.0.0 shouldn't clobber any released builds
TAG ?= 0.26 TAG ?= 0.27
REGISTRY ?= gcr.io/google_containers REGISTRY ?= gcr.io/google_containers
ARCH ?= $(shell go env GOARCH) ARCH ?= $(shell go env GOARCH)
ALL_ARCH = amd64 arm arm64 ppc64le ALL_ARCH = amd64 arm arm64 ppc64le

View file

@ -1,5 +1,5 @@
nginx 1.13.x base image using [ubuntu-slim](https://github.com/kubernetes/ingress/tree/master/images/ubuntu-slim) nginx 1.13.x base image using [ubuntu-slim](https://github.com/kubernetes/ingress-nginx/tree/master/images/ubuntu-slim)
nginx [engine x] is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP proxy server. nginx [engine x] is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP proxy server.
@ -14,7 +14,7 @@ This image does provides a default configuration file with no backend servers.
*Using docker* *Using docker*
``` ```
$ docker run -v /some/nginx.con:/etc/nginx/nginx.conf:ro gcr.io/google_containers/nginx-slim:0.22 $ docker run -v /some/nginx.con:/etc/nginx/nginx.conf:ro gcr.io/google_containers/nginx-slim:0.27
``` ```
*Creating a replication controller* *Creating a replication controller*

View file

@ -29,6 +29,6 @@ spec:
spec: spec:
containers: containers:
- name: nginxslim - name: nginxslim
image: gcr.io/google_containers/nginx-slim:0.22 image: gcr.io/google_containers/nginx-slim:0.27
ports: ports:
- containerPort: 80 - containerPort: 80

View file

@ -17,25 +17,112 @@ limitations under the License.
package cors package cors
import ( import (
"regexp"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/ingress-nginx/pkg/ingress/annotations/parser" "k8s.io/ingress-nginx/pkg/ingress/annotations/parser"
) )
const ( const (
annotation = "ingress.kubernetes.io/enable-cors" annotationCorsEnabled = "ingress.kubernetes.io/enable-cors"
annotationCorsAllowOrigin = "ingress.kubernetes.io/cors-allow-origin"
annotationCorsAllowMethods = "ingress.kubernetes.io/cors-allow-methods"
annotationCorsAllowHeaders = "ingress.kubernetes.io/cors-allow-headers"
annotationCorsAllowCredentials = "ingress.kubernetes.io/cors-allow-credentials"
// Default values
defaultCorsMethods = "GET, PUT, POST, DELETE, PATCH, OPTIONS"
defaultCorsHeaders = "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization"
)
var (
// Regex are defined here to prevent information leak, if user tries to set anything not valid
// that could cause the Response to contain some internal value/variable (like returning $pid, $upstream_addr, etc)
// Origin must contain a http/s Origin (including or not the port) or the value '*'
corsOriginRegex = regexp.MustCompile(`^(https?://[A-Za-z0-9\-\.]*(:[0-9]+)?|\*)?$`)
// Method must contain valid methods list (PUT, GET, POST, BLA)
// May contain or not spaces between each verb
corsMethodsRegex = regexp.MustCompile(`^([A-Za-z]+,?\s?)+$`)
// Headers must contain valid values only (X-HEADER12, X-ABC)
// May contain or not spaces between each Header
corsHeadersRegex = regexp.MustCompile(`^([A-Za-z0-9\-\_]+,?\s?)+$`)
) )
type cors struct { type cors struct {
} }
// CorsConfig contains the Cors configuration to be used in the Ingress
type CorsConfig struct {
CorsEnabled bool `json:"corsEnabled"`
CorsAllowOrigin string `json:"corsAllowOrigin"`
CorsAllowMethods string `json:"corsAllowMethods"`
CorsAllowHeaders string `json:"corsAllowHeaders"`
CorsAllowCredentials bool `json:"corsAllowCredentials"`
}
// NewParser creates a new CORS annotation parser // NewParser creates a new CORS annotation parser
func NewParser() parser.IngressAnnotation { func NewParser() parser.IngressAnnotation {
return cors{} return cors{}
} }
// Equal tests for equality between two External types
func (c1 *CorsConfig) Equal(c2 *CorsConfig) bool {
if c1 == c2 {
return true
}
if c1 == nil || c2 == nil {
return false
}
if c1.CorsAllowCredentials != c2.CorsAllowCredentials {
return false
}
if c1.CorsAllowHeaders != c2.CorsAllowHeaders {
return false
}
if c1.CorsAllowOrigin != c2.CorsAllowOrigin {
return false
}
if c1.CorsEnabled != c2.CorsEnabled {
return false
}
return true
}
// Parse parses the annotations contained in the ingress // Parse parses the annotations contained in the ingress
// rule used to indicate if the location/s should allows CORS // rule used to indicate if the location/s should allows CORS
func (a cors) Parse(ing *extensions.Ingress) (interface{}, error) { func (a cors) Parse(ing *extensions.Ingress) (interface{}, error) {
return parser.GetBoolAnnotation(annotation, ing) corsenabled, err := parser.GetBoolAnnotation(annotationCorsEnabled, ing)
if err != nil {
corsenabled = false
}
corsalloworigin, err := parser.GetStringAnnotation(annotationCorsAllowOrigin, ing)
if err != nil || corsalloworigin == "" || !corsOriginRegex.MatchString(corsalloworigin) {
corsalloworigin = "*"
}
corsallowheaders, err := parser.GetStringAnnotation(annotationCorsAllowHeaders, ing)
if err != nil || corsallowheaders == "" || !corsHeadersRegex.MatchString(corsallowheaders) {
corsallowheaders = defaultCorsHeaders
}
corsallowmethods, err := parser.GetStringAnnotation(annotationCorsAllowMethods, ing)
if err != nil || corsallowmethods == "" || !corsMethodsRegex.MatchString(corsallowmethods) {
corsallowmethods = defaultCorsMethods
}
corsallowcredentials, err := parser.GetBoolAnnotation(annotationCorsAllowCredentials, ing)
if err != nil {
corsallowcredentials = true
}
return &CorsConfig{
CorsEnabled: corsenabled,
CorsAllowOrigin: corsalloworigin,
CorsAllowHeaders: corsallowheaders,
CorsAllowMethods: corsallowmethods,
CorsAllowCredentials: corsallowcredentials,
}, nil
} }

View file

@ -22,42 +22,75 @@ import (
api "k8s.io/api/core/v1" api "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
) )
const ( func buildIngress() *extensions.Ingress {
notCorsAnnotation = "ingress.kubernetes.io/enable-not-cors" defaultBackend := extensions.IngressBackend{
) ServiceName: "default-backend",
ServicePort: intstr.FromInt(80),
func TestParse(t *testing.T) {
ap := NewParser()
if ap == nil {
t.Fatalf("expected a parser.IngressAnnotation but returned nil")
} }
testCases := []struct { return &extensions.Ingress{
annotations map[string]string
expected bool
}{
{map[string]string{annotation: "true"}, true},
{map[string]string{annotation: "false"}, false},
{map[string]string{notCorsAnnotation: "true"}, false},
{map[string]string{}, false},
{nil, false},
}
ing := &extensions.Ingress{
ObjectMeta: meta_v1.ObjectMeta{ ObjectMeta: meta_v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: api.NamespaceDefault, Namespace: api.NamespaceDefault,
}, },
Spec: extensions.IngressSpec{}, Spec: extensions.IngressSpec{
} Backend: &extensions.IngressBackend{
ServiceName: "default-backend",
for _, testCase := range testCases { ServicePort: intstr.FromInt(80),
ing.SetAnnotations(testCase.annotations) },
result, _ := ap.Parse(ing) Rules: []extensions.IngressRule{
if result != testCase.expected { {
t.Errorf("expected %t but returned %t, annotations: %s", testCase.expected, result, testCase.annotations) Host: "foo.bar.com",
} IngressRuleValue: extensions.IngressRuleValue{
HTTP: &extensions.HTTPIngressRuleValue{
Paths: []extensions.HTTPIngressPath{
{
Path: "/foo",
Backend: defaultBackend,
},
},
},
},
},
},
},
} }
} }
func TestIngressCorsConfig(t *testing.T) {
ing := buildIngress()
data := map[string]string{}
data[annotationCorsEnabled] = "true"
data[annotationCorsAllowHeaders] = "DNT,X-CustomHeader, Keep-Alive,User-Agent"
data[annotationCorsAllowCredentials] = "false"
data[annotationCorsAllowMethods] = "PUT, GET,OPTIONS, PATCH, $nginx_version"
data[annotationCorsAllowOrigin] = "https://origin123.test.com:4443"
ing.SetAnnotations(data)
corst, _ := NewParser().Parse(ing)
nginxCors, ok := corst.(*CorsConfig)
if !ok {
t.Errorf("expected a Config type")
}
if nginxCors.CorsEnabled != true {
t.Errorf("expected cors enabled but returned %v", nginxCors.CorsEnabled)
}
if nginxCors.CorsAllowHeaders != "DNT,X-CustomHeader, Keep-Alive,User-Agent" {
t.Errorf("expected headers not found. Found %v", nginxCors.CorsAllowHeaders)
}
if nginxCors.CorsAllowMethods != "GET, PUT, POST, DELETE, PATCH, OPTIONS" {
t.Errorf("expected default methods, but got %v", nginxCors.CorsAllowMethods)
}
if nginxCors.CorsAllowOrigin != "https://origin123.test.com:4443" {
t.Errorf("expected origin https://origin123.test.com:4443, but got %v", nginxCors.CorsAllowOrigin)
}
}

View file

@ -67,7 +67,7 @@ func newAnnotationExtractor(cfg extractorConfig) annotationExtractor {
"BasicDigestAuth": auth.NewParser(auth.AuthDirectory, cfg), "BasicDigestAuth": auth.NewParser(auth.AuthDirectory, cfg),
"ExternalAuth": authreq.NewParser(), "ExternalAuth": authreq.NewParser(),
"CertificateAuth": authtls.NewParser(cfg), "CertificateAuth": authtls.NewParser(cfg),
"EnableCORS": cors.NewParser(), "CorsConfig": cors.NewParser(),
"HealthCheck": healthcheck.NewParser(cfg), "HealthCheck": healthcheck.NewParser(cfg),
"Whitelist": ipwhitelist.NewParser(cfg), "Whitelist": ipwhitelist.NewParser(cfg),
"UsePortInRedirects": portinredirect.NewParser(cfg), "UsePortInRedirects": portinredirect.NewParser(cfg),
@ -130,6 +130,7 @@ const (
sessionAffinity = "SessionAffinity" sessionAffinity = "SessionAffinity"
serviceUpstream = "ServiceUpstream" serviceUpstream = "ServiceUpstream"
serverAlias = "Alias" serverAlias = "Alias"
corsConfig = "CorsConfig"
clientBodyBufferSize = "ClientBodyBufferSize" clientBodyBufferSize = "ClientBodyBufferSize"
certificateAuth = "CertificateAuth" certificateAuth = "CertificateAuth"
serverSnippet = "ServerSnippet" serverSnippet = "ServerSnippet"
@ -175,6 +176,11 @@ func (e *annotationExtractor) SessionAffinity(ing *extensions.Ingress) *sessiona
return val.(*sessionaffinity.AffinityConfig) return val.(*sessionaffinity.AffinityConfig)
} }
func (e *annotationExtractor) Cors(ing *extensions.Ingress) *cors.CorsConfig {
val, _ := e.annotations[corsConfig].Parse(ing)
return val.(*cors.CorsConfig)
}
func (e *annotationExtractor) CertificateAuth(ing *extensions.Ingress) *authtls.AuthSSLConfig { func (e *annotationExtractor) CertificateAuth(ing *extensions.Ingress) *authtls.AuthSSLConfig {
val, err := e.annotations[certificateAuth].Parse(ing) val, err := e.annotations[certificateAuth].Parse(ing)
if errors.IsMissingAnnotations(err) { if errors.IsMissingAnnotations(err) {

View file

@ -29,15 +29,22 @@ import (
) )
const ( const (
annotationSecureUpstream = "ingress.kubernetes.io/secure-backends" annotationSecureUpstream = "ingress.kubernetes.io/secure-backends"
annotationSecureVerifyCACert = "ingress.kubernetes.io/secure-verify-ca-secret" annotationSecureVerifyCACert = "ingress.kubernetes.io/secure-verify-ca-secret"
annotationUpsMaxFails = "ingress.kubernetes.io/upstream-max-fails" annotationUpsMaxFails = "ingress.kubernetes.io/upstream-max-fails"
annotationUpsFailTimeout = "ingress.kubernetes.io/upstream-fail-timeout" annotationUpsFailTimeout = "ingress.kubernetes.io/upstream-fail-timeout"
annotationPassthrough = "ingress.kubernetes.io/ssl-passthrough" annotationPassthrough = "ingress.kubernetes.io/ssl-passthrough"
annotationAffinityType = "ingress.kubernetes.io/affinity" annotationAffinityType = "ingress.kubernetes.io/affinity"
annotationAffinityCookieName = "ingress.kubernetes.io/session-cookie-name" annotationCorsEnabled = "ingress.kubernetes.io/enable-cors"
annotationAffinityCookieHash = "ingress.kubernetes.io/session-cookie-hash" annotationCorsAllowOrigin = "ingress.kubernetes.io/cors-allow-origin"
annotationUpstreamHashBy = "ingress.kubernetes.io/upstream-hash-by" annotationCorsAllowMethods = "ingress.kubernetes.io/cors-allow-methods"
annotationCorsAllowHeaders = "ingress.kubernetes.io/cors-allow-headers"
annotationCorsAllowCredentials = "ingress.kubernetes.io/cors-allow-credentials"
defaultCorsMethods = "GET, PUT, POST, DELETE, PATCH, OPTIONS"
defaultCorsHeaders = "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization"
annotationAffinityCookieName = "ingress.kubernetes.io/session-cookie-name"
annotationAffinityCookieHash = "ingress.kubernetes.io/session-cookie-hash"
annotationUpstreamHashBy = "ingress.kubernetes.io/upstream-hash-by"
) )
type mockCfg struct { type mockCfg struct {
@ -293,3 +300,54 @@ func TestAffinitySession(t *testing.T) {
} }
} }
} }
func TestCors(t *testing.T) {
ec := newAnnotationExtractor(mockCfg{})
ing := buildIngress()
fooAnns := []struct {
annotations map[string]string
corsenabled bool
methods string
headers string
origin string
credentials bool
}{
{map[string]string{annotationCorsEnabled: "true"}, true, defaultCorsMethods, defaultCorsHeaders, "*", true},
{map[string]string{annotationCorsEnabled: "true", annotationCorsAllowMethods: "POST, GET, OPTIONS", annotationCorsAllowHeaders: "$nginx_version", annotationCorsAllowCredentials: "false"}, true, "POST, GET, OPTIONS", defaultCorsHeaders, "*", false},
{map[string]string{annotationCorsEnabled: "true", annotationCorsAllowCredentials: "false"}, true, defaultCorsMethods, defaultCorsHeaders, "*", false},
{map[string]string{}, false, defaultCorsMethods, defaultCorsHeaders, "*", true},
{nil, false, defaultCorsMethods, defaultCorsHeaders, "*", true},
}
for _, foo := range fooAnns {
ing.SetAnnotations(foo.annotations)
r := ec.Cors(ing)
t.Logf("Testing pass %v %v %v %v %v", foo.corsenabled, foo.methods, foo.headers, foo.origin, foo.credentials)
if r == nil {
t.Errorf("Returned nil but expected a Cors.CorsConfig")
continue
}
if r.CorsEnabled != foo.corsenabled {
t.Errorf("Returned %v but expected %v for Cors Enabled", r.CorsEnabled, foo.corsenabled)
}
if r.CorsAllowHeaders != foo.headers {
t.Errorf("Returned %v but expected %v for Cors Headers", r.CorsAllowHeaders, foo.headers)
}
if r.CorsAllowMethods != foo.methods {
t.Errorf("Returned %v but expected %v for Cors Methods", r.CorsAllowMethods, foo.methods)
}
if r.CorsAllowOrigin != foo.origin {
t.Errorf("Returned %v but expected %v for Cors Methods", r.CorsAllowOrigin, foo.origin)
}
if r.CorsAllowCredentials != foo.credentials {
t.Errorf("Returned %v but expected %v for Cors Methods", r.CorsAllowCredentials, foo.credentials)
}
}
}

View file

@ -23,6 +23,7 @@ import (
"k8s.io/ingress-nginx/pkg/ingress" "k8s.io/ingress-nginx/pkg/ingress"
"k8s.io/ingress-nginx/pkg/ingress/annotations/auth" "k8s.io/ingress-nginx/pkg/ingress/annotations/auth"
"k8s.io/ingress-nginx/pkg/ingress/annotations/authreq" "k8s.io/ingress-nginx/pkg/ingress/annotations/authreq"
"k8s.io/ingress-nginx/pkg/ingress/annotations/cors"
"k8s.io/ingress-nginx/pkg/ingress/annotations/ipwhitelist" "k8s.io/ingress-nginx/pkg/ingress/annotations/ipwhitelist"
"k8s.io/ingress-nginx/pkg/ingress/annotations/proxy" "k8s.io/ingress-nginx/pkg/ingress/annotations/proxy"
"k8s.io/ingress-nginx/pkg/ingress/annotations/ratelimit" "k8s.io/ingress-nginx/pkg/ingress/annotations/ratelimit"
@ -45,7 +46,7 @@ func TestMergeLocationAnnotations(t *testing.T) {
"Backend": "foo_backend", "Backend": "foo_backend",
"BasicDigestAuth": auth.BasicDigest{}, "BasicDigestAuth": auth.BasicDigest{},
DeniedKeyName: &fakeError{}, DeniedKeyName: &fakeError{},
"EnableCORS": true, "CorsConfig": cors.CorsConfig{},
"ExternalAuth": authreq.External{}, "ExternalAuth": authreq.External{},
"RateLimit": ratelimit.RateLimit{}, "RateLimit": ratelimit.RateLimit{},
"Redirect": redirect.Redirect{}, "Redirect": redirect.Redirect{},

View file

@ -24,19 +24,19 @@ import (
) )
func TestEqualConfiguration(t *testing.T) { func TestEqualConfiguration(t *testing.T) {
ap, _ := filepath.Abs("../../tests/manifests/configuration-a.json") ap, _ := filepath.Abs("../../test/manifests/configuration-a.json")
a, err := readJSON(ap) a, err := readJSON(ap)
if err != nil { if err != nil {
t.Errorf("unexpected error reading JSON file: %v", err) t.Errorf("unexpected error reading JSON file: %v", err)
} }
bp, _ := filepath.Abs("../../tests/manifests/configuration-b.json") bp, _ := filepath.Abs("../../test/manifests/configuration-b.json")
b, err := readJSON(bp) b, err := readJSON(bp)
if err != nil { if err != nil {
t.Errorf("unexpected error reading JSON file: %v", err) t.Errorf("unexpected error reading JSON file: %v", err)
} }
cp, _ := filepath.Abs("../../tests/manifests/configuration-c.json") cp, _ := filepath.Abs("../../test/manifests/configuration-c.json")
c, err := readJSON(cp) c, err := readJSON(cp)
if err != nil { if err != nil {
t.Errorf("unexpected error reading JSON file: %v", err) t.Errorf("unexpected error reading JSON file: %v", err)

View file

@ -29,6 +29,7 @@ import (
"k8s.io/ingress-nginx/pkg/ingress/annotations/auth" "k8s.io/ingress-nginx/pkg/ingress/annotations/auth"
"k8s.io/ingress-nginx/pkg/ingress/annotations/authreq" "k8s.io/ingress-nginx/pkg/ingress/annotations/authreq"
"k8s.io/ingress-nginx/pkg/ingress/annotations/authtls" "k8s.io/ingress-nginx/pkg/ingress/annotations/authtls"
"k8s.io/ingress-nginx/pkg/ingress/annotations/cors"
"k8s.io/ingress-nginx/pkg/ingress/annotations/ipwhitelist" "k8s.io/ingress-nginx/pkg/ingress/annotations/ipwhitelist"
"k8s.io/ingress-nginx/pkg/ingress/annotations/proxy" "k8s.io/ingress-nginx/pkg/ingress/annotations/proxy"
"k8s.io/ingress-nginx/pkg/ingress/annotations/ratelimit" "k8s.io/ingress-nginx/pkg/ingress/annotations/ratelimit"
@ -293,9 +294,9 @@ type Location struct {
// Denied returns an error when this location cannot not be allowed // Denied returns an error when this location cannot not be allowed
// Requesting a denied location should return HTTP code 403. // Requesting a denied location should return HTTP code 403.
Denied error `json:"denied,omitempty"` Denied error `json:"denied,omitempty"`
// EnableCORS indicates if path must support CORS // CorsConfig returns the Cors Configration for the ingress rule
// +optional // +optional
EnableCORS bool `json:"enableCors,omitempty"` CorsConfig cors.CorsConfig `json:"corsConfig,omitempty"`
// ExternalAuth indicates the access to this location requires // ExternalAuth indicates the access to this location requires
// authentication using an external provider // authentication using an external provider
// +optional // +optional

View file

@ -355,7 +355,7 @@ func (l1 *Location) Equal(l2 *Location) bool {
if l1.Denied != l2.Denied { if l1.Denied != l2.Denied {
return false return false
} }
if l1.EnableCORS != l2.EnableCORS { if !(&l1.CorsConfig).Equal(&l2.CorsConfig) {
return false return false
} }
if !(&l1.ExternalAuth).Equal(&l2.ExternalAuth) { if !(&l1.ExternalAuth).Equal(&l2.ExternalAuth) {

58
pkg/k8s/ensure.go Normal file
View file

@ -0,0 +1,58 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8s
import (
api "k8s.io/api/core/v1"
core "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/kubernetes"
)
func EnsureSecret(cl kubernetes.Interface, secret *api.Secret) (*api.Secret, error) {
s, err := cl.CoreV1().Secrets(secret.Namespace).Create(secret)
if err != nil {
if k8sErrors.IsAlreadyExists(err) {
return cl.CoreV1().Secrets(secret.Namespace).Update(secret)
}
return nil, err
}
return s, nil
}
func EnsureIngress(cl kubernetes.Interface, ingress *extensions.Ingress) (*extensions.Ingress, error) {
s, err := cl.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(ingress)
if err != nil {
if k8sErrors.IsNotFound(err) {
return cl.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(ingress)
}
return nil, err
}
return s, nil
}
func EnsureService(cl kubernetes.Interface, service *core.Service) (*core.Service, error) {
s, err := cl.CoreV1().Services(service.Namespace).Update(service)
if err != nil {
if k8sErrors.IsNotFound(err) {
return cl.CoreV1().Services(service.Namespace).Create(service)
}
return nil, err
}
return s, nil
}

View file

@ -57,3 +57,59 @@ func TestNginxHashBucketSize(t *testing.T) {
} }
} }
} }
func TestNextPowerOf2(t *testing.T) {
// Powers of 2
actual := nextPowerOf2(2)
if actual != 2 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 2, actual)
}
actual = nextPowerOf2(4)
if actual != 4 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 4, actual)
}
actual = nextPowerOf2(32)
if actual != 32 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 32, actual)
}
actual = nextPowerOf2(256)
if actual != 256 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 256, actual)
}
// Not Powers of 2
actual = nextPowerOf2(7)
if actual != 8 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 8, actual)
}
actual = nextPowerOf2(9)
if actual != 16 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 16, actual)
}
actual = nextPowerOf2(15)
if actual != 16 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 16, actual)
}
actual = nextPowerOf2(17)
if actual != 32 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 32, actual)
}
actual = nextPowerOf2(250)
if actual != 256 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 256, actual)
}
// Other
actual = nextPowerOf2(0)
if actual != 0 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 0, actual)
}
actual = nextPowerOf2(-1)
if actual != 0 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 0, actual)
}
actual = nextPowerOf2(-2)
if actual != 0 {
t.Errorf("TestNextPowerOf2: expected %d but returned %d.", 0, actual)
}
}

View file

@ -163,7 +163,7 @@ func TestBuildAuthResponseHeaders(t *testing.T) {
func TestTemplateWithData(t *testing.T) { func TestTemplateWithData(t *testing.T) {
pwd, _ := os.Getwd() pwd, _ := os.Getwd()
f, err := os.Open(path.Join(pwd, "../../../tests/data/config.json")) f, err := os.Open(path.Join(pwd, "../../../test/data/config.json"))
if err != nil { if err != nil {
t.Errorf("unexpected error reading json file: %v", err) t.Errorf("unexpected error reading json file: %v", err)
} }

View file

@ -515,48 +515,24 @@ stream {
{{/* CORS support from https://michielkalkman.com/snippets/nginx-cors-open-configuration.html */}} {{/* CORS support from https://michielkalkman.com/snippets/nginx-cors-open-configuration.html */}}
{{ define "CORS" }} {{ define "CORS" }}
{{ $cors := .CorsConfig }}
# Cors Preflight methods needs additional options and different Return Code
if ($request_method = 'OPTIONS') { if ($request_method = 'OPTIONS') {
add_header 'Access-Control-Allow-Origin' '*'; add_header 'Access-Control-Allow-Origin' '{{ $cors.CorsAllowOrigin }}';
# {{ if $cors.CorsAllowCredentials }} add_header 'Access-Control-Allow-Credentials' '{{ $cors.CorsAllowCredentials }}'; {{ end }}
# Om nom nom cookies add_header 'Access-Control-Allow-Methods' '{{ $cors.CorsAllowMethods }}';
# add_header 'Access-Control-Allow-Headers' '{{ $cors.CorsAllowHeaders }}';
add_header 'Access-Control-Allow-Credentials' 'true';
add_header 'Access-Control-Allow-Methods' 'GET, PUT, POST, DELETE, PATCH, OPTIONS';
#
# Custom headers and headers various browsers *should* be OK with but aren't
#
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
#
# Tell client that this pre-flight info is valid for 20 days
#
add_header 'Access-Control-Max-Age' 1728000; add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain charset=UTF-8'; add_header 'Content-Type' 'text/plain charset=UTF-8';
add_header 'Content-Length' 0; add_header 'Content-Length' 0;
return 204; return 204;
} }
set $cors_method 0;
if ($request_method = 'GET') {
set $cors_method 1;
}
if ($request_method = 'PUT') {
set $cors_method 1;
}
if ($request_method = 'POST') {
set $cors_method 1;
}
if ($request_method = 'DELETE') {
set $cors_method 1;
}
if ($request_method = 'PATCH') {
set $cors_method 1;
}
if ($cors_method = 1) { add_header 'Access-Control-Allow-Origin' '{{ $cors.CorsAllowOrigin }}';
add_header 'Access-Control-Allow-Origin' '*' always; {{ if $cors.CorsAllowCredentials }} add_header 'Access-Control-Allow-Credentials' '{{ $cors.CorsAllowCredentials }}'; {{ end }}
add_header 'Access-Control-Allow-Credentials' 'true'; add_header 'Access-Control-Allow-Methods' '{{ $cors.CorsAllowMethods }}';
add_header 'Access-Control-Allow-Methods' 'GET, PUT, POST, DELETE, PATCH, OPTIONS'; add_header 'Access-Control-Allow-Headers' '{{ $cors.CorsAllowHeaders }}';
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
{{ end }} {{ end }}
{{/* definition of server-template to avoid repetitions with server-alias */}} {{/* definition of server-template to avoid repetitions with server-alias */}}
@ -729,8 +705,8 @@ stream {
proxy_set_header Authorization ""; proxy_set_header Authorization "";
{{ end }} {{ end }}
{{ if $location.EnableCORS }} {{ if $location.CorsConfig.CorsEnabled }}
{{ template "CORS" }} {{ template "CORS" $location }}
{{ end }} {{ end }}
{{ if not (empty $location.Redirect.URL) }} {{ if not (empty $location.Redirect.URL) }}

View file

@ -1,5 +1,19 @@
#!/bin/bash #!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script removes consecutive empty lines in nginx.conf # This script removes consecutive empty lines in nginx.conf
# Using sed is more simple than using a go regex # Using sed is more simple than using a go regex

View file

@ -0,0 +1,110 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaultbackend
import (
"crypto/tls"
"net/http"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/parnurzeal/gorequest"
"k8s.io/ingress-nginx/test/e2e/framework"
)
const defaultBackend = "default backend - 404"
var _ = framework.IngressNginxDescribe("Default backend", func() {
f := framework.NewDefaultFramework("default-backend")
BeforeEach(func() {
})
AfterEach(func() {
})
It("should return 404 sending requests when only a default backend is running", func() {
httpURL, err := f.GetNginxURL(framework.HTTP)
Expect(err).NotTo(HaveOccurred())
httpsURL, err := f.GetNginxURL(framework.HTTPS)
Expect(err).NotTo(HaveOccurred())
request := gorequest.New()
testCases := []struct {
Name string
Host string
Scheme framework.RequestScheme
Method string
Path string
Status int
}{
{"basic HTTP GET request without host to path / should return 404", "", framework.HTTP, "GET", "/", 404},
{"basic HTTP GET request without host to path /demo should return 404", "", framework.HTTP, "GET", "/demo", 404},
{"basic HTTPS GET request without host to path / should return 404", "", framework.HTTPS, "GET", "/", 404},
{"basic HTTPS GET request without host to path /demo should return 404", "", framework.HTTPS, "GET", "/demo", 404},
{"basic HTTP POST request without host to path / should return 404", "", framework.HTTP, "POST", "/", 404},
{"basic HTTP POST request without host to path /demo should return 404", "", framework.HTTP, "POST", "/demo", 404},
{"basic HTTPS POST request without host to path / should return 404", "", framework.HTTPS, "POST", "/", 404},
{"basic HTTPS POST request without host to path /demo should return 404", "", framework.HTTPS, "POST", "/demo", 404},
{"basic HTTP GET request to host foo.bar.com and path / should return 404", " foo.bar.com", framework.HTTP, "GET", "/", 404},
{"basic HTTP GET request to host foo.bar.com and path /demo should return 404", " foo.bar.com", framework.HTTP, "GET", "/demo", 404},
{"basic HTTPS GET request to host foo.bar.com and path / should return 404", " foo.bar.com", framework.HTTPS, "GET", "/", 404},
{"basic HTTPS GET request to host foo.bar.com and path /demo should return 404", " foo.bar.com", framework.HTTPS, "GET", "/demo", 404},
{"basic HTTP POST request to host foo.bar.com and path / should return 404", " foo.bar.com", framework.HTTP, "POST", "/", 404},
{"basic HTTP POST request to host foo.bar.com and path /demo should return 404", " foo.bar.com", framework.HTTP, "POST", "/demo", 404},
{"basic HTTPS POST request to host foo.bar.com and path / should return 404", " foo.bar.com", framework.HTTPS, "POST", "/", 404},
{"basic HTTPS POST request to host foo.bar.com and path /demo should return 404", " foo.bar.com", framework.HTTPS, "POST", "/demo", 404},
}
for _, test := range testCases {
By(test.Name)
var errs []error
var cm *gorequest.SuperAgent
switch test.Scheme {
case framework.HTTP:
cm = request.CustomMethod(test.Method, httpURL)
break
case framework.HTTPS:
cm = request.CustomMethod(test.Method, httpsURL)
// the default backend uses a self generated certificate
cm.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
break
default:
Fail("Unexpected request scheme")
}
if test.Host != "" {
cm.Set("Host", test.Host)
}
resp, _, errs := cm.End()
Expect(len(errs)).Should(BeNumerically("==", 0))
Expect(resp.StatusCode).Should(Equal(test.Status))
}
})
})

View file

@ -0,0 +1,76 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaultbackend
import (
"crypto/tls"
"net/http"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/parnurzeal/gorequest"
"k8s.io/ingress-nginx/test/e2e/framework"
)
var _ = framework.IngressNginxDescribe("Default backend - SSL", func() {
f := framework.NewDefaultFramework("default-backend")
BeforeEach(func() {
})
AfterEach(func() {
})
It("should return a self generated SSL certificate", func() {
httpsURL, err := f.GetNginxURL(framework.HTTPS)
Expect(err).NotTo(HaveOccurred())
request := gorequest.New()
By("checking SSL Certificate using the NGINX IP address")
cm := request.Post(httpsURL)
// the default backend uses a self generated certificate
cm.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
resp, _, errs := cm.End()
Expect(len(errs)).Should(BeNumerically("==", 0))
Expect(len(resp.TLS.PeerCertificates)).Should(BeNumerically("==", 1))
for _, pc := range resp.TLS.PeerCertificates {
Expect(pc.Issuer.CommonName).Should(Equal("Kubernetes Ingress Controller Fake Certificate"))
}
By("checking SSL Certificate using the NGINX catch all server")
cm = request.Post(httpsURL)
// the default backend uses a self generated certificate
cm.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
cm.Set("Host", "foo.bar.com")
resp, _, errs = cm.End()
Expect(len(errs)).Should(BeNumerically("==", 0))
Expect(len(resp.TLS.PeerCertificates)).Should(BeNumerically("==", 1))
for _, pc := range resp.TLS.PeerCertificates {
Expect(pc.Issuer.CommonName).Should(Equal("Kubernetes Ingress Controller Fake Certificate"))
}
})
})

44
test/e2e/e2e.go Normal file
View file

@ -0,0 +1,44 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"testing"
"github.com/golang/glog"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/gomega"
"k8s.io/apiserver/pkg/util/logs"
_ "k8s.io/client-go/plugin/pkg/client/auth"
_ "k8s.io/ingress-nginx/test/e2e/defaultbackend"
"k8s.io/ingress-nginx/test/e2e/framework"
)
// RunE2ETests checks configuration parameters (specified through flags) and then runs
// E2E tests using the Ginkgo runner.
func RunE2ETests(t *testing.T) {
logs.InitLogs()
defer logs.FlushLogs()
gomega.RegisterFailHandler(ginkgo.Fail)
// Disable skipped tests unless they are explicitly requested.
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
}
glog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode)
ginkgo.RunSpecs(t, "nginx-ingress-controller e2e suite")
}

34
test/e2e/e2e_test.go Normal file
View file

@ -0,0 +1,34 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"testing"
"github.com/golang/glog"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/ingress-nginx/test/e2e/framework"
)
func init() {
framework.RegisterParseFlags()
if "" == framework.TestContext.KubeConfig {
glog.Fatalf("environment variable %v must be set", clientcmd.RecommendedConfigPathEnvVar)
}
}
func TestE2E(t *testing.T) {
RunE2ETests(t)
}

View file

@ -0,0 +1,60 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import "sync"
type CleanupActionHandle *int
var cleanupActionsLock sync.Mutex
var cleanupActions = map[CleanupActionHandle]func(){}
// AddCleanupAction installs a function that will be called in the event of the
// whole test being terminated. This allows arbitrary pieces of the overall
// test to hook into SynchronizedAfterSuite().
func AddCleanupAction(fn func()) CleanupActionHandle {
p := CleanupActionHandle(new(int))
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
cleanupActions[p] = fn
return p
}
// RemoveCleanupAction removes a function that was installed by AddCleanupAction.
func RemoveCleanupAction(p CleanupActionHandle) {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
delete(cleanupActions, p)
}
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
// not remove them (see RemoveCleanupAction) but it does run unlocked, so they
// may remove themselves.
func RunCleanupActions() {
list := []func(){}
func() {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
for _, fn := range cleanupActions {
list = append(list, fn)
}
}()
// Run unlocked.
for _, fn := range list {
fn()
}
}

View file

@ -0,0 +1,145 @@
/*
Copyright 2017 Jetstack Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"os/exec"
"strings"
"k8s.io/api/core/v1"
apiextcs "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
podName = "test-ingress-controller"
)
type RequestScheme string
// These are valid test request schemes.
const (
HTTP RequestScheme = "http"
HTTPS RequestScheme = "https"
)
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
type Framework struct {
BaseName string
// A Kubernetes and Service Catalog client
KubeClientSet kubernetes.Interface
APIExtensionsClientSet apiextcs.Interface
// Namespace in which all test resources should reside
Namespace *v1.Namespace
// To make sure that this framework cleans up after itself, no matter what,
// we install a Cleanup action before each test and clear it after. If we
// should abort, the AfterSuite hook should run all Cleanup actions.
cleanupHandle CleanupActionHandle
}
// NewFramework makes a new framework and sets up a BeforeEach/AfterEach for
// you (you can write additional before/after each functions).
func NewDefaultFramework(baseName string) *Framework {
f := &Framework{
BaseName: baseName,
}
BeforeEach(f.BeforeEach)
AfterEach(f.AfterEach)
return f
}
// BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() {
f.cleanupHandle = AddCleanupAction(f.AfterEach)
By("Creating a kubernetes client")
kubeConfig, err := LoadConfig(TestContext.KubeConfig, TestContext.KubeContext)
Expect(err).NotTo(HaveOccurred())
f.KubeClientSet, err = kubernetes.NewForConfig(kubeConfig)
Expect(err).NotTo(HaveOccurred())
By("Building a namespace api object")
f.Namespace, err = CreateKubeNamespace(f.BaseName, f.KubeClientSet)
Expect(err).NotTo(HaveOccurred())
}
// AfterEach deletes the namespace, after reading its events.
func (f *Framework) AfterEach() {
RemoveCleanupAction(f.cleanupHandle)
By("Deleting test namespace")
err := DeleteKubeNamespace(f.KubeClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
By("Waiting for test namespace to no longer exist")
err = WaitForKubeNamespaceNotExist(f.KubeClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
}
// IngressNginxDescribe wrapper function for ginkgo describe. Adds namespacing.
func IngressNginxDescribe(text string, body func()) bool {
return Describe("[nginx-ingress] "+text, body)
}
// GetNginxIP returns the IP address of the minikube cluster
// where the NGINX ingress controller is running
func (f *Framework) GetNginxIP() (string, error) {
out, err := exec.Command("minikube", "ip").Output()
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
}
// GetNginxPort returns the number of TCP port where NGINX is running
func (f *Framework) GetNginxPort(name string) (int, error) {
s, err := f.KubeClientSet.CoreV1().Services("ingress-nginx").Get("ingress-nginx", meta_v1.GetOptions{})
if err != nil {
return -1, err
}
for _, p := range s.Spec.Ports {
if p.NodePort != 0 && p.Name == name {
return int(p.NodePort), nil
}
}
return -1, err
}
// GetNginxURL returns the URL should be used to make a request to NGINX
func (f *Framework) GetNginxURL(scheme RequestScheme) (string, error) {
ip, err := f.GetNginxIP()
if err != nil {
return "", err
}
port, err := f.GetNginxPort(fmt.Sprintf("%v", scheme))
if err != nil {
return "", err
}
return fmt.Sprintf("%v://%v:%v", scheme, ip, port), nil
}

View file

@ -0,0 +1,56 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"flag"
"os"
"github.com/onsi/ginkgo/config"
"k8s.io/client-go/tools/clientcmd"
)
const (
RecommendedConfigPathEnvVar = "INGRESSNGINXCONFIG"
)
type TestContextType struct {
KubeHost string
KubeConfig string
KubeContext string
}
var TestContext TestContextType
// Register flags common to all e2e test suites.
func RegisterCommonFlags() {
// Turn on verbose by default to get spec names
config.DefaultReporterConfig.Verbose = true
// Turn on EmitSpecProgress to get spec progress (especially on interrupt)
config.GinkgoConfig.EmitSpecProgress = true
// Randomize specs as well as suites
config.GinkgoConfig.RandomizeAllSpecs = true
flag.StringVar(&TestContext.KubeHost, "kubernetes-host", "http://127.0.0.1:8080", "The kubernetes host, or apiserver, to connect to")
flag.StringVar(&TestContext.KubeConfig, "kubernetes-config", os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to config containing embedded authinfo for kubernetes. Default value is from environment variable "+clientcmd.RecommendedConfigPathEnvVar)
flag.StringVar(&TestContext.KubeContext, "kubernetes-context", "", "config context to use for kuberentes. If unset, will use value from 'current-context'")
}
func RegisterParseFlags() {
RegisterCommonFlags()
flag.Parse()
}

174
test/e2e/framework/util.go Normal file
View file

@ -0,0 +1,174 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
)
const (
// Poll how often to poll for conditions
Poll = 2 * time.Second
// Default time to wait for operations to complete
defaultTimeout = 30 * time.Second
)
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Fail(nowStamp()+": "+msg, 1)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Skip(nowStamp() + ": " + msg)
}
func RestclientConfig(config, context string) (*api.Config, error) {
Logf(">>> config: %s\n", config)
if config == "" {
return nil, fmt.Errorf("Config file must be specified to load client config")
}
c, err := clientcmd.LoadFromFile(config)
if err != nil {
return nil, fmt.Errorf("error loading config: %v", err.Error())
}
if context != "" {
Logf(">>> context: %s\n", context)
c.CurrentContext = context
}
return c, nil
}
type ClientConfigGetter func() (*rest.Config, error)
func LoadConfig(config, context string) (*rest.Config, error) {
c, err := RestclientConfig(config, context)
if err != nil {
return nil, err
}
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{}).ClientConfig()
}
// RunId unique identifier of the e2e run
var RunId = uuid.NewUUID()
func CreateKubeNamespace(baseName string, c kubernetes.Interface) (*v1.Namespace, error) {
ns := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
},
}
// Be robust about making the namespace creation call.
var got *v1.Namespace
err := wait.PollImmediate(Poll, defaultTimeout, func() (bool, error) {
var err error
got, err = c.Core().Namespaces().Create(ns)
if err != nil {
Logf("Unexpected error while creating namespace: %v", err)
return false, nil
}
Logf("Created namespace: %v", got.Name)
return true, nil
})
if err != nil {
return nil, err
}
return got, nil
}
func DeleteKubeNamespace(c kubernetes.Interface, namespace string) error {
return c.Core().Namespaces().Delete(namespace, nil)
}
func ExpectNoError(err error, explain ...interface{}) {
if err != nil {
Logf("Unexpected error occurred: %v", err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
func WaitForKubeNamespaceNotExist(c kubernetes.Interface, namespace string) error {
return wait.PollImmediate(Poll, time.Minute*2, namespaceNotExist(c, namespace))
}
func namespaceNotExist(c kubernetes.Interface, namespace string) wait.ConditionFunc {
return func() (bool, error) {
_, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return true, nil
}
if err != nil {
return false, err
}
return false, nil
}
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodRunningInNamespace(c kubernetes.Interface, pod *v1.Pod) error {
if pod.Status.Phase == v1.PodRunning {
return nil
}
return waitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, defaultTimeout)
}
func waitTimeoutForPodRunningInNamespace(c kubernetes.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, defaultTimeout, podRunning(c, podName, namespace))
}
func podRunning(c kubernetes.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodRunning:
return true, nil
case v1.PodFailed, v1.PodSucceeded:
return false, fmt.Errorf("pod ran to completion")
}
return false, nil
}
}

61
test/e2e/up.sh Executable file
View file

@ -0,0 +1,61 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'
echo "downloading kubectl..."
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/amd64/kubectl && \
chmod +x kubectl && sudo mv kubectl /usr/local/bin/
echo "downloading minikube..."
curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && \
chmod +x minikube && \
sudo mv minikube /usr/local/bin/
echo "starting minikube..."
sudo minikube start --vm-driver=none --kubernetes-version=$KUBERNETES_VERSION
minikube update-context
echo "waiting for kubernetes cluster"
until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True";
do
sleep 1;
done
echo "deploying NGINX Ingress controller"
cat deploy/namespace.yaml | kubectl apply -f -
cat deploy/default-backend.yaml | kubectl apply -f -
cat deploy/configmap.yaml | kubectl apply -f -
cat deploy/tcp-services-configmap.yaml | kubectl apply -f -
cat deploy/udp-services-configmap.yaml | kubectl apply -f -
cat deploy/without-rbac.yaml | kubectl apply -f -
cat deploy/provider/baremetal/service-nodeport.yaml | kubectl apply -f -
echo "updating image..."
kubectl set image \
deployments \
--namespace ingress-nginx \
--selector app=ingress-nginx \
nginx-ingress-controller=gcr.io/google_containers/nginx-ingress-controller:test
sleep 5
echo "waiting NGINX ingress pod..."
until kubectl get pods -n ingress-nginx -l app=ingress-nginx -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True";
do
sleep 1;
done

16
vendor/cloud.google.com/go/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,16 @@
sudo: false
language: go
go:
- 1.6
- 1.7
- 1.8
install:
- go get -v cloud.google.com/go/...
script:
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in key.json.enc -out key.json -d
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
./run-tests.sh $TRAVIS_COMMIT
env:
matrix:
# The GCLOUD_TESTS_API_KEY environment variable.
secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI=

15
vendor/cloud.google.com/go/AUTHORS generated vendored Normal file
View file

@ -0,0 +1,15 @@
# This is the official list of cloud authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
Filippo Valsorda <hi@filippo.io>
Google Inc.
Ingo Oeser <nightlyone@googlemail.com>
Palm Stone Games, Inc.
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Tyler Treat <ttreat31@gmail.com>

144
vendor/cloud.google.com/go/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,144 @@
# Contributing
1. Sign one of the contributor license agreements below.
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
1. You will need to ensure that your `GOBIN` directory (by default
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
1. If you would like, you may want to set up aliases for git-codereview,
such that `git codereview change` becomes `git change`. See the
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
1. Should you run into issues with the git-codereview tool, please note
that all error messages will assume that you have set up these
aliases.
1. Get the cloud package by running `go get -d cloud.google.com/go`.
1. If you have already checked out the source, make sure that the remote git
origin is https://code.googlesource.com/gocloud:
git remote set-url origin https://code.googlesource.com/gocloud
1. Make sure your auth is configured correctly by visiting
https://code.googlesource.com, clicking "Generate Password", and following
the directions.
1. Make changes and create a change by running `git codereview change <name>`,
provide a commit message, and use `git codereview mail` to create a Gerrit CL.
1. Keep amending to the change with `git codereview change` and mail as your receive
feedback. Each new mailed amendment will create a new patch set for your change in Gerrit.
## Integration Tests
In addition to the unit tests, you may run the integration test suite.
To run the integrations tests, creating and configuration of a project in the
Google Developers Console is required.
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project)
(or **Editor** and **Logs Configuration Writer** roles) are added to the
service account.
Once you create a project, set the following environment variables to be able to
run the against the actual APIs.
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
- **GCLOUD_TESTS_API_KEY**: Your API key.
Firestore requires a different project and key:
- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: Developers Console project's ID
supporting Firestore
- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
to create some resources used in integration tests.
From the project's root directory:
``` sh
# Set the default project in your env.
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
# Authenticate the gcloud tool with your account.
$ gcloud auth login
# Create the indexes used in the datastore integration tests.
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
# Create a Google Cloud storage bucket with the same name as your test project,
# and with the Stackdriver Logging service account as owner, for the sink
# integration tests in logging.
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
# Create a Spanner instance for the spanner integration tests.
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test'
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete
# the instance after testing with 'gcloud beta spanner instances delete'.
```
Once you've set the environment variables, you can run the integration tests by
running:
``` sh
$ go test -v cloud.google.com/go/...
```
## Contributor License Agreements
Before we can accept your pull requests you'll need to sign a Contributor
License Agreement (CLA):
- **If you are an individual writing original source code** and **you own the
- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
- **If you work for a company that wants to allow you to contribute your work**,
then you'll need to sign a [corporate CLA][corpcla].
You can sign these electronically (just scroll to the bottom). After that,
we'll be able to accept your pull requests.
## Contributor Code of Conduct
As contributors and maintainers of this project,
and in the interest of fostering an open and welcoming community,
we pledge to respect all people who contribute through reporting issues,
posting feature requests, updating documentation,
submitting pull requests or patches, and other activities.
We are committed to making participation in this project
a harassment-free experience for everyone,
regardless of level of experience, gender, gender identity and expression,
sexual orientation, disability, personal appearance,
body size, race, ethnicity, age, religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information,
such as physical or electronic
addresses, without explicit permission
* Other unethical or unprofessional conduct.
Project maintainers have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct.
By adopting this Code of Conduct,
project maintainers commit themselves to fairly and consistently
applying these principles to every aspect of managing this project.
Project maintainers who do not follow or enforce the Code of Conduct
may be permanently removed from the project team.
This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by opening an issue
or contacting one or more of the project maintainers.
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
[indvcla]: https://developers.google.com/open-source/cla/individual
[corpcla]: https://developers.google.com/open-source/cla/corporate

37
vendor/cloud.google.com/go/CONTRIBUTORS generated vendored Normal file
View file

@ -0,0 +1,37 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# Names should be added to this file as:
# Name <email address>
# Keep the list alphabetically sorted.
Alexis Hunt <lexer@google.com>
Andreas Litt <andreas.litt@gmail.com>
Andrew Gerrand <adg@golang.org>
Brad Fitzpatrick <bradfitz@golang.org>
Burcu Dogan <jbd@google.com>
Dave Day <djd@golang.org>
David Sansome <me@davidsansome.com>
David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Magnus Hiie <magnus.hiie@gmail.com>
Michael McGreevy <mcgreevy@golang.org>
Omar Jarjur <ojarjur@google.com>
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Sarah Adams <shadams@google.com>
Thanatat Tamtan <acoshift@gmail.com>
Toby Burress <kurin@google.com>
Tuo Shan <shantuo@google.com>
Tyler Treat <ttreat31@gmail.com>

202
vendor/cloud.google.com/go/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

54
vendor/cloud.google.com/go/MIGRATION.md generated vendored Normal file
View file

@ -0,0 +1,54 @@
# Code Changes
## v0.10.0
- pubsub: Replace
```
sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"})
```
with
```
sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{
PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"},
})
```
- trace: traceGRPCServerInterceptor will be provided from *trace.Client.
Given an initialized `*trace.Client` named `tc`, instead of
```
s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc)))
```
write
```
s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor()))
```
- trace trace.GRPCClientInterceptor will also provided from *trace.Client.
Instead of
```
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor()))
```
write
```
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))
```
- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC
interceptor as a dial option as shown below when initializing Cloud package
clients:
```
c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())))
if err != nil {
...
}
```

514
vendor/cloud.google.com/go/README.md generated vendored Normal file
View file

@ -0,0 +1,514 @@
# Google Cloud Client Libraries for Go
[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go)
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
``` go
import "cloud.google.com/go"
```
To install the packages on your system,
```
$ go get -u cloud.google.com/go/...
```
**NOTE:** Some of these packages are under development, and may occasionally
make backwards-incompatible changes.
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
* [News](#news)
* [Supported APIs](#supported-apis)
* [Go Versions Supported](#go-versions-supported)
* [Authorization](#authorization)
* [Cloud Datastore](#cloud-datastore-)
* [Cloud Storage](#cloud-storage-)
* [Cloud Pub/Sub](#cloud-pub-sub-)
* [Cloud BigQuery](#cloud-bigquery-)
* [Stackdriver Logging](#stackdriver-logging-)
* [Cloud Spanner](#cloud-spanner-)
## News
*v0.15.0*
_October 3, 2017_
- firestore: beta release. See the
[announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html).
- errorreporting: The existing package has been redesigned.
- errors: This package has been removed. Use errorreporting.
_September 28, 2017_
*v0.14.0*
- bigquery BREAKING CHANGES:
- Standard SQL is the default for queries and views.
- `Table.Create` takes `TableMetadata` as a second argument, instead of
options.
- `Dataset.Create` takes `DatasetMetadata` as a second argument.
- `DatasetMetadata` field `ID` renamed to `FullID`
- `TableMetadata` field `ID` renamed to `FullID`
- Other bigquery changes:
- The client will append a random suffix to a provided job ID if you set
`AddJobIDSuffix` to true in a job config.
- Listing jobs is supported.
- Better retry logic.
- vision, language, speech: clients are now stable
- monitoring: client is now beta
- profiler:
- Rename InstanceName to Instance, ZoneName to Zone
- Auto-detect service name and version on AppEngine.
_September 8, 2017_
*v0.13.0*
- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these
options to continue using Legacy SQL after the client switches its default
to Standard SQL.
- bigquery: Support for updating dataset labels.
- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other
than the client's. DatasetsInProject is no longer needed and is deprecated.
- bigtable: Fail ListInstances when any zones fail.
- spanner: support decoding of slices of basic types (e.g. []string, []int64,
etc.)
- logging/logadmin: UpdateSink no longer creates a sink if it is missing
(actually a change to the underlying service, not the client)
- profiler: Service and ServiceVersion replace Target in Config.
_August 22, 2017_
*v0.12.0*
- pubsub: Subscription.Receive now uses streaming pull.
- pubsub: add Client.TopicInProject to access topics in a different project
than the client.
- errors: renamed errorreporting. The errors package will be removed shortly.
- datastore: improved retry behavior.
- bigquery: support updates to dataset metadata, with etags.
- bigquery: add etag support to Table.Update (BREAKING: etag argument added).
- bigquery: generate all job IDs on the client.
- storage: support bucket lifecycle configurations.
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
## Supported APIs
Google API | Status | Package
---------------------------------|--------------|-----------------------------------------------------------
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
[Spanner][cloud-spanner] | beta | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref]
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
> **Alpha status**: the API is still being actively developed. As a
> result, it might change in backward-incompatible ways and is not recommended
> for production use.
>
> **Beta status**: the API is largely complete, but still has outstanding
> features and bugs to be addressed. There may be minor backwards-incompatible
> changes where necessary.
>
> **Stable status**: the API is mature and ready for production use. We will
> continue addressing bugs and feature requests.
Documentation and examples are available at
https://godoc.org/cloud.google.com/go
Visit or join the
[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce)
for updates on these packages.
## Go Versions Supported
We support the two most recent major versions of Go. If Google App Engine uses
an older version, we support that as well. You can see which versions are
currently supported by looking at the lines following `go:` in
[`.travis.yml`](.travis.yml).
## Authorization
By default, each API will use [Google Application Default Credentials][default-creds]
for authorization credentials used in calling the API endpoints. This will allow your
application to run in many environments without requiring explicit configuration.
[snip]:# (auth)
```go
client, err := storage.NewClient(ctx)
```
To authorize using a
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
pass
[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile)
to the `NewClient` function of the desired package. For example:
[snip]:# (auth-JSON)
```go
client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json"))
```
You can exert more control over authorization by using the
[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
create an `oauth2.TokenSource`. Then pass
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
to the `NewClient` function:
[snip]:# (auth-ts)
```go
tokenSource := ...
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
```
## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore)
- [About Cloud Datastore][cloud-datastore]
- [Activating the API for your project][cloud-datastore-activation]
- [API documentation][cloud-datastore-docs]
- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore)
- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks)
### Example Usage
First create a `datastore.Client` to use throughout your application:
[snip]:# (datastore-1)
```go
client, err := datastore.NewClient(ctx, "my-project-id")
if err != nil {
log.Fatal(err)
}
```
Then use that client to interact with the API:
[snip]:# (datastore-2)
```go
type Post struct {
Title string
Body string `datastore:",noindex"`
PublishedAt time.Time
}
keys := []*datastore.Key{
datastore.NameKey("Post", "post1", nil),
datastore.NameKey("Post", "post2", nil),
}
posts := []*Post{
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
{Title: "Post 2", Body: "...", PublishedAt: time.Now()},
}
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
log.Fatal(err)
}
```
## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage)
- [About Cloud Storage][cloud-storage]
- [API documentation][cloud-storage-docs]
- [Go client documentation](https://godoc.org/cloud.google.com/go/storage)
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
### Example Usage
First create a `storage.Client` to use throughout your application:
[snip]:# (storage-1)
```go
client, err := storage.NewClient(ctx)
if err != nil {
log.Fatal(err)
}
```
[snip]:# (storage-2)
```go
// Read the object1 from bucket.
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
if err != nil {
log.Fatal(err)
}
defer rc.Close()
body, err := ioutil.ReadAll(rc)
if err != nil {
log.Fatal(err)
}
```
## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub)
- [About Cloud Pubsub][cloud-pubsub]
- [API documentation][cloud-pubsub-docs]
- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub)
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub)
### Example Usage
First create a `pubsub.Client` to use throughout your application:
[snip]:# (pubsub-1)
```go
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
```
Then use the client to publish and subscribe:
[snip]:# (pubsub-2)
```go
// Publish "hello world" on topic1.
topic := client.Topic("topic1")
res := topic.Publish(ctx, &pubsub.Message{
Data: []byte("hello world"),
})
// The publish happens asynchronously.
// Later, you can get the result from res:
...
msgID, err := res.Get(ctx)
if err != nil {
log.Fatal(err)
}
// Use a callback to receive messages via subscription1.
sub := client.Subscription("subscription1")
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
fmt.Println(m.Data)
m.Ack() // Acknowledge that we've consumed the message.
})
if err != nil {
log.Println(err)
}
```
## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery)
- [About Cloud BigQuery][cloud-bigquery]
- [API documentation][cloud-bigquery-docs]
- [Go client documentation][cloud-bigquery-ref]
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery)
### Example Usage
First create a `bigquery.Client` to use throughout your application:
[snip]:# (bq-1)
```go
c, err := bigquery.NewClient(ctx, "my-project-ID")
if err != nil {
// TODO: Handle error.
}
```
Then use that client to interact with the API:
[snip]:# (bq-2)
```go
// Construct a query.
q := c.Query(`
SELECT year, SUM(number)
FROM [bigquery-public-data:usa_names.usa_1910_2013]
WHERE name = "William"
GROUP BY year
ORDER BY year
`)
// Execute the query.
it, err := q.Read(ctx)
if err != nil {
// TODO: Handle error.
}
// Iterate through the results.
for {
var values []bigquery.Value
err := it.Next(&values)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(values)
}
```
## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging)
- [About Stackdriver Logging][cloud-logging]
- [API documentation][cloud-logging-docs]
- [Go client documentation][cloud-logging-ref]
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging)
### Example Usage
First create a `logging.Client` to use throughout your application:
[snip]:# (logging-1)
```go
ctx := context.Background()
client, err := logging.NewClient(ctx, "my-project")
if err != nil {
// TODO: Handle error.
}
```
Usually, you'll want to add log entries to a buffer to be periodically flushed
(automatically and asynchronously) to the Stackdriver Logging service.
[snip]:# (logging-2)
```go
logger := client.Logger("my-log")
logger.Log(logging.Entry{Payload: "something happened!"})
```
Close your client before your program exits, to flush any buffered log entries.
[snip]:# (logging-3)
```go
err = client.Close()
if err != nil {
// TODO: Handle error.
}
```
## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner)
- [About Cloud Spanner][cloud-spanner]
- [API documentation][cloud-spanner-docs]
- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner)
### Example Usage
First create a `spanner.Client` to use throughout your application:
[snip]:# (spanner-1)
```go
client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
if err != nil {
log.Fatal(err)
}
```
[snip]:# (spanner-2)
```go
// Simple Reads And Writes
_, err = client.Apply(ctx, []*spanner.Mutation{
spanner.Insert("Users",
[]string{"name", "email"},
[]interface{}{"alice", "a@example.com"})})
if err != nil {
log.Fatal(err)
}
row, err := client.Single().ReadRow(ctx, "Users",
spanner.Key{"alice"}, []string{"email"})
if err != nil {
log.Fatal(err)
}
```
## Contributing
Contributions are welcome. Please, see the
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
document for details. We're using Gerrit for our code reviews. Please don't open pull
requests against this repo, new pull requests will be automatically closed.
Please note that this project is released with a Contributor Code of Conduct.
By participating in this project you agree to abide by its terms.
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
for more information.
[cloud-datastore]: https://cloud.google.com/datastore/
[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
[cloud-firestore]: https://cloud.google.com/firestore/
[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore
[cloud-firestore-docs]: https://cloud.google.com/firestore/docs
[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate
[cloud-pubsub]: https://cloud.google.com/pubsub/
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
[cloud-storage]: https://cloud.google.com/storage/
[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage
[cloud-storage-docs]: https://cloud.google.com/storage/docs
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
[cloud-bigtable]: https://cloud.google.com/bigtable/
[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable
[cloud-bigquery]: https://cloud.google.com/bigquery/
[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs
[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery
[cloud-logging]: https://cloud.google.com/logging/
[cloud-logging-docs]: https://cloud.google.com/logging/docs
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
[cloud-monitoring]: https://cloud.google.com/monitoring/
[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3
[cloud-vision]: https://cloud.google.com/vision
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1
[cloud-language]: https://cloud.google.com/natural-language
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
[cloud-speech]: https://cloud.google.com/speech
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1
[cloud-spanner]: https://cloud.google.com/spanner/
[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner
[cloud-spanner-docs]: https://cloud.google.com/spanner/docs
[cloud-translation]: https://cloud.google.com/translation
[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation
[cloud-trace]: https://cloud.google.com/trace/
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace
[cloud-video]: https://cloud.google.com/video-intelligence/
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
[cloud-errors]: https://cloud.google.com/error-reporting/
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials

32
vendor/cloud.google.com/go/appveyor.yml generated vendored Normal file
View file

@ -0,0 +1,32 @@
# This file configures AppVeyor (http://www.appveyor.com),
# a Windows-based CI service similar to Travis.
# Identifier for this run
version: "{build}"
# Clone the repo into this path, which conforms to the standard
# Go workspace structure.
clone_folder: c:\gopath\src\cloud.google.com\go
environment:
GOPATH: c:\gopath
GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762
GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json
KEYFILE_CONTENTS:
secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje
install:
# Info for debugging.
- echo %PATH%
- go version
- go env
- go get -v -d -t ./...
# Provide a build script, or AppVeyor will call msbuild.
build_script:
- go install -v ./...
- echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY%
test_script:
- go test -v ./...

49
vendor/cloud.google.com/go/authexample_test.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloud_test
import (
"cloud.google.com/go/datastore"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
func Example_applicationDefaultCredentials() {
// Google Application Default Credentials is the recommended way to authorize
// and authenticate clients.
//
// See the following link on how to create and obtain Application Default Credentials:
// https://developers.google.com/identity/protocols/application-default-credentials.
client, err := datastore.NewClient(context.Background(), "project-id")
if err != nil {
// TODO: handle error.
}
_ = client // Use the client.
}
func Example_serviceAccountFile() {
// Use a JSON key file associated with a Google service account to
// authenticate and authorize. Service Account keys can be created and
// downloaded from https://console.developers.google.com/permissions/serviceaccounts.
//
// Note: This example uses the datastore client, but the same steps apply to
// the other client libraries underneath this package.
client, err := datastore.NewClient(context.Background(),
"project-id", option.WithServiceAccountFile("/path/to/service-account-key.json"))
if err != nil {
// TODO: handle error.
}
_ = client // Use the client.
}

20
vendor/cloud.google.com/go/cloud.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package cloud is the root of the packages used to access Google Cloud
// Services. See https://godoc.org/cloud.google.com/go for a full list
// of sub-packages.
//
// This package documents how to authorize and authenticate the sub packages.
package cloud // import "cloud.google.com/go"

437
vendor/cloud.google.com/go/compute/metadata/metadata.go generated vendored Normal file
View file

@ -0,0 +1,437 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
userAgent = "gcloud-golang/0.1"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
metaClient = &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}
subscribeClient = &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func Get(suffix string) (string, error) {
val, _, err := getETag(metaClient, suffix)
return val, err
}
// getETag returns a value from the metadata service as well as the associated
// ETag using the provided client. This func is otherwise equivalent to Get.
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := client.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
func getTrimmed(suffix string) (s string, err error) {
s, err = Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *cachedValue) get() (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = getTrimmed(c.k)
} else {
v, err = Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, metaClient, req)
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := getETag(subscribeClient, suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return projID.get() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return projNum.get() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) {
return getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) {
var s []string
j, err := Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) {
return instID.get()
}
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) {
host, err := Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) {
zone, err := getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
func lines(suffix string) ([]string, error) {
j, err := Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func InstanceAttributeValue(attr string) (string, error) {
return Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func ProjectAttributeValue(attr string) (string, error) {
return Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}

View file

@ -0,0 +1,48 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metadata
import (
"os"
"sync"
"testing"
)
func TestOnGCE_Stress(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
var last bool
for i := 0; i < 100; i++ {
onGCEOnce = sync.Once{}
now := OnGCE()
if i > 0 && now != last {
t.Errorf("%d. changed from %v to %v", i, last, now)
}
last = now
}
t.Logf("OnGCE() = %v", last)
}
func TestOnGCE_Force(t *testing.T) {
onGCEOnce = sync.Once{}
old := os.Getenv(metadataHostEnv)
defer os.Setenv(metadataHostEnv, old)
os.Setenv(metadataHostEnv, "127.0.0.1")
if !OnGCE() {
t.Error("OnGCE() = false; want true")
}
}

BIN
vendor/cloud.google.com/go/key.json.enc generated vendored Normal file

Binary file not shown.

70
vendor/cloud.google.com/go/license_test.go generated vendored Normal file
View file

@ -0,0 +1,70 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloud
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
)
var sentinels = []string{
"Copyright",
"Google Inc",
`Licensed under the Apache License, Version 2.0 (the "License");`,
}
func TestLicense(t *testing.T) {
err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if ext := filepath.Ext(path); ext != ".go" && ext != ".proto" {
return nil
}
if strings.HasSuffix(path, ".pb.go") {
// .pb.go files are generated from the proto files.
// .proto files must have license headers.
return nil
}
if path == "bigtable/cmd/cbt/cbtdoc.go" {
// Automatically generated.
return nil
}
src, err := ioutil.ReadFile(path)
if err != nil {
return nil
}
src = src[:140] // Ensure all of the sentinel values are at the top of the file.
// Find license
for _, sentinel := range sentinels {
if !bytes.Contains(src, []byte(sentinel)) {
t.Errorf("%v: license header not present. want %q", path, sentinel)
return nil
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
}

486
vendor/cloud.google.com/go/old-news.md generated vendored Normal file
View file

@ -0,0 +1,486 @@
_July 31, 2017_
*v0.11.0*
- Clients for spanner, pubsub and video are now in beta.
- New client for DLP.
- spanner: performance and testing improvements.
- storage: requester-pays buckets are supported.
- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements.
- pubsub: bug fixes and other minor improvements
_June 17, 2017_
*v0.10.0*
- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update.
- pubsub: Subscription.Receive now runs concurrently for higher throughput.
- vision: cloud.google.com/go/vision is deprecated. Use
cloud.google.com/go/vision/apiv1 instead.
- translation: now stable.
- trace: several changes to the surface. See the link below.
[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md)
_March 17, 2017_
Breaking Pubsub changes.
* Publish is now asynchronous
([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)).
* Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)).
* Message.Done replaced with Message.Ack and Message.Nack.
_February 14, 2017_
Release of a client library for Spanner. See
the
[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html).
Note that although the Spanner service is beta, the Go client library is alpha.
_December 12, 2016_
Beta release of BigQuery, DataStore, Logging and Storage. See the
[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html).
Also, BigQuery now supports structs. Read a row directly into a struct with
`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`.
You can also use field tags. See the [package documentation][cloud-bigquery-ref]
for details.
_December 5, 2016_
More changes to BigQuery:
* The `ValueList` type was removed. It is no longer necessary. Instead of
```go
var v ValueList
... it.Next(&v) ..
```
use
```go
var v []Value
... it.Next(&v) ...
```
* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or
`ValueList` would append to the slice. Now each call resets the size to zero first.
* Schema inference will infer the SQL type BYTES for a struct field of
type []byte. Previously it inferred STRING.
* The types `uint`, `uint64` and `uintptr` are no longer supported in schema
inference. BigQuery's integer type is INT64, and those types may hold values
that are not correctly represented in a 64-bit signed integer.
* The SQL types DATE, TIME and DATETIME are now supported. They correspond to
the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil`
package.
_November 17, 2016_
Change to BigQuery: values from INTEGER columns will now be returned as int64,
not int. This will avoid errors arising from large values on 32-bit systems.
_November 8, 2016_
New datastore feature: datastore now encodes your nested Go structs as Entity values,
instead of a flattened list of the embedded struct's fields.
This means that you may now have twice-nested slices, eg.
```go
type State struct {
Cities []struct{
Populations []int
}
}
```
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for
more details.
_November 8, 2016_
Breaking changes to datastore: contexts no longer hold namespaces; instead you
must set a key's namespace explicitly. Also, key functions have been changed
and renamed.
* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method:
```go
q := datastore.NewQuery("Kind").Namespace("ns")
```
* All the fields of Key are exported. That means you can construct any Key with a struct literal:
```go
k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"}
```
* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed.
* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace
```go
NewIncompleteKey(ctx, kind, parent)
```
with
```go
IncompleteKey(kind, parent)
```
and if you do use namespaces, make sure you set the namespace on the returned key.
* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace
```go
NewKey(ctx, kind, name, 0, parent)
NewKey(ctx, kind, "", id, parent)
```
with
```go
NameKey(kind, name, parent)
IDKey(kind, id, parent)
```
and if you do use namespaces, make sure you set the namespace on the returned key.
* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`.
* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection.
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for
more details.
_October 27, 2016_
Breaking change to bigquery: `NewGCSReference` is now a function,
not a method on `Client`.
New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling
loading data into a table from a file or any `io.Reader`.
_October 21, 2016_
Breaking change to pubsub: removed `pubsub.Done`.
Use `iterator.Done` instead, where `iterator` is the package
`google.golang.org/api/iterator`.
_October 19, 2016_
Breaking changes to cloud.google.com/go/bigquery:
* Client.Table and Client.OpenTable have been removed.
Replace
```go
client.OpenTable("project", "dataset", "table")
```
with
```go
client.DatasetInProject("project", "dataset").Table("table")
```
* Client.CreateTable has been removed.
Replace
```go
client.CreateTable(ctx, "project", "dataset", "table")
```
with
```go
client.DatasetInProject("project", "dataset").Table("table").Create(ctx)
```
* Dataset.ListTables have been replaced with Dataset.Tables.
Replace
```go
tables, err := ds.ListTables(ctx)
```
with
```go
it := ds.Tables(ctx)
for {
table, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: use table.
}
```
* Client.Read has been replaced with Job.Read, Table.Read and Query.Read.
Replace
```go
it, err := client.Read(ctx, job)
```
with
```go
it, err := job.Read(ctx)
```
and similarly for reading from tables or queries.
* The iterator returned from the Read methods is now named RowIterator. Its
behavior is closer to the other iterators in these libraries. It no longer
supports the Schema method; see the next item.
Replace
```go
for it.Next(ctx) {
var vals ValueList
if err := it.Get(&vals); err != nil {
// TODO: Handle error.
}
// TODO: use vals.
}
if err := it.Err(); err != nil {
// TODO: Handle error.
}
```
with
```
for {
var vals ValueList
err := it.Next(&vals)
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: use vals.
}
```
Instead of the `RecordsPerRequest(n)` option, write
```go
it.PageInfo().MaxSize = n
```
Instead of the `StartIndex(i)` option, write
```go
it.StartIndex = i
```
* ValueLoader.Load now takes a Schema in addition to a slice of Values.
Replace
```go
func (vl *myValueLoader) Load(v []bigquery.Value)
```
with
```go
func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema)
```
* Table.Patch is replace by Table.Update.
Replace
```go
p := table.Patch()
p.Description("new description")
metadata, err := p.Apply(ctx)
```
with
```go
metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{
Description: "new description",
})
```
* Client.Copy is replaced by separate methods for each of its four functions.
All options have been replaced by struct fields.
* To load data from Google Cloud Storage into a table, use Table.LoaderFrom.
Replace
```go
client.Copy(ctx, table, gcsRef)
```
with
```go
table.LoaderFrom(gcsRef).Run(ctx)
```
Instead of passing options to Copy, set fields on the Loader:
```go
loader := table.LoaderFrom(gcsRef)
loader.WriteDisposition = bigquery.WriteTruncate
```
* To extract data from a table into Google Cloud Storage, use
Table.ExtractorTo. Set fields on the returned Extractor instead of
passing options.
Replace
```go
client.Copy(ctx, gcsRef, table)
```
with
```go
table.ExtractorTo(gcsRef).Run(ctx)
```
* To copy data into a table from one or more other tables, use
Table.CopierFrom. Set fields on the returned Copier instead of passing options.
Replace
```go
client.Copy(ctx, dstTable, srcTable)
```
with
```go
dst.Table.CopierFrom(srcTable).Run(ctx)
```
* To start a query job, create a Query and call its Run method. Set fields
on the query instead of passing options.
Replace
```go
client.Copy(ctx, table, query)
```
with
```go
query.Run(ctx)
```
* Table.NewUploader has been renamed to Table.Uploader. Instead of options,
configure an Uploader by setting its fields.
Replace
```go
u := table.NewUploader(bigquery.UploadIgnoreUnknownValues())
```
with
```go
u := table.NewUploader(bigquery.UploadIgnoreUnknownValues())
u.IgnoreUnknownValues = true
```
_October 10, 2016_
Breaking changes to cloud.google.com/go/storage:
* AdminClient replaced by methods on Client.
Replace
```go
adminClient.CreateBucket(ctx, bucketName, attrs)
```
with
```go
client.Bucket(bucketName).Create(ctx, projectID, attrs)
```
* BucketHandle.List replaced by BucketHandle.Objects.
Replace
```go
for query != nil {
objs, err := bucket.List(d.ctx, query)
if err != nil { ... }
query = objs.Next
for _, obj := range objs.Results {
fmt.Println(obj)
}
}
```
with
```go
iter := bucket.Objects(d.ctx, query)
for {
obj, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil { ... }
fmt.Println(obj)
}
```
(The `iterator` package is at `google.golang.org/api/iterator`.)
Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`.
Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`.
* ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom.
Replace
```go
attrs, err := src.CopyTo(ctx, dst, nil)
```
with
```go
attrs, err := dst.CopierFrom(src).Run(ctx)
```
Replace
```go
attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"})
```
with
```go
c := dst.CopierFrom(src)
c.ContextType = "text/html"
attrs, err := c.Run(ctx)
```
* ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom.
Replace
```go
attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil)
```
with
```go
attrs, err := dst.ComposerFrom(src1, src2).Run(ctx)
```
* ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate.
Replace
```go
attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"})
```
with
```go
attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"})
```
* ObjectHandle.WithConditions replaced by ObjectHandle.If.
Replace
```go
obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen))
```
with
```go
obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen})
```
Replace
```go
obj.WithConditions(storage.IfGenerationMatch(0))
```
with
```go
obj.If(storage.Conditions{DoesNotExist: true})
```
* `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`).
_October 6, 2016_
Package preview/logging deleted. Use logging instead.
_September 27, 2016_
Logging client replaced with preview version (see below).
_September 8, 2016_
* New clients for some of Google's Machine Learning APIs: Vision, Speech, and
Natural Language.
* Preview version of a new [Stackdriver Logging][cloud-logging] client in
[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging).
This client uses gRPC as its transport layer, and supports log reading, sinks
and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly.

88
vendor/cloud.google.com/go/run-tests.sh generated vendored Executable file
View file

@ -0,0 +1,88 @@
#!/bin/bash
# Selectively run tests for this repo, based on what has changed
# in a commit. Runs short tests for the whole repo, and full tests
# for changed directories.
set -e
prefix=cloud.google.com/go
dryrun=false
if [[ $1 == "-n" ]]; then
dryrun=true
shift
fi
if [[ $1 == "" ]]; then
echo >&2 "usage: $0 [-n] COMMIT"
exit 1
fi
# Files or directories that cause all tests to run if modified.
declare -A run_all
run_all=([.travis.yml]=1 [run-tests.sh]=1)
function run {
if $dryrun; then
echo $*
else
(set -x; $*)
fi
}
# Find all the packages that have changed in this commit.
declare -A changed_packages
for f in $(git diff-tree --no-commit-id --name-only -r $1); do
if [[ ${run_all[$f]} == 1 ]]; then
# This change requires a full test. Do it and exit.
run go test -race -v $prefix/...
exit
fi
# Map, e.g., "spanner/client.go" to "$prefix/spanner".
d=$(dirname $f)
if [[ $d == "." ]]; then
pkg=$prefix
else
pkg=$prefix/$d
fi
changed_packages[$pkg]=1
done
echo "changed packages: ${!changed_packages[*]}"
# Reports whether its argument, a package name, depends (recursively)
# on a changed package.
function depends_on_changed_package {
# According to go list, a package does not depend on itself, so
# we test that separately.
if [[ ${changed_packages[$1]} == 1 ]]; then
return 0
fi
for dep in $(go list -f '{{range .Deps}}{{.}} {{end}}' $1); do
if [[ ${changed_packages[$dep]} == 1 ]]; then
return 0
fi
done
return 1
}
# Collect the packages into two separate lists. (It is faster go test a list of
# packages than to individually go test each one.)
shorts=
fulls=
for pkg in $(go list $prefix/...); do # for each package in the repo
if depends_on_changed_package $pkg; then # if it depends on a changed package
fulls="$fulls $pkg" # run the full test
else # otherwise
shorts="$shorts $pkg" # run the short test
fi
done
run go test -race -v -short $shorts
if [[ $fulls != "" ]]; then
run go test -race -v $fulls
fi

31
vendor/github.com/Azure/go-autorest/.gitignore generated vendored Normal file
View file

@ -0,0 +1,31 @@
# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore)
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
.DS_Store
.idea/
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# go-autorest specific
vendor/
autorest/azure/example/example

25
vendor/github.com/Azure/go-autorest/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,25 @@
sudo: false
language: go
go:
- 1.9
- 1.8
- 1.7
- 1.6
install:
- go get -u github.com/golang/lint/golint
- go get -u github.com/Masterminds/glide
- go get -u github.com/stretchr/testify
- go get -u github.com/GoASTScanner/gas
- glide install
script:
- grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee /dev/stderr | test -z "$(< /dev/stdin)"
- test -z "$(gofmt -s -l -w ./autorest/. | tee /dev/stderr)"
- test -z "$(golint ./autorest/... | tee /dev/stderr)"
- go vet ./autorest/...
- test -z "$(gas ./autorest/... | tee /dev/stderr | grep Error)"
- go build -v ./autorest/...
- go test -v ./autorest/...

215
vendor/github.com/Azure/go-autorest/CHANGELOG.md generated vendored Normal file
View file

@ -0,0 +1,215 @@
# CHANGELOG
## v9.1.1
- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`.
## v9.1.0
### New Features
- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error.
- Support for loading Azure CLI Authentication files.
- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously.
### Bug Fixes
- RetriableRequest can now tolerate a ReadSeekable body being read but not reset.
- Adding missing Apache Headers
## v9.0.0
> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes.
Adding MSI Endpoint Support and CLI token rehydration.
## v8.3.1
Pick up bug fix in adal for MSI support.
## v8.3.0
Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience.
## v8.2.0
### New Features
- Add support for bearer authentication callbacks
- Support 429 response codes that include "Retry-After" header
- Support validation constraint "Pattern" for map keys
### Bug Fixes
- Make RetriableRequest work with multiple versions of Go
## v8.1.1
Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8.
## v8.1.0
Adds RetriableRequest type for more efficient handling of retrying HTTP requests.
## v8.0.0
ADAL refactored into its own package.
Support for UNIX time.
## v7.3.1
- Version Testing now removed from production bits that are shipped with the library.
## v7.3.0
- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations
to acknowledge that they do not need either the entire or a trailing portion
of accepts response body. In doing so, Go's http library can reuse HTTP
connections more readily.
- Adding `PrepareDecorator` to target custom BaseURLs.
- Adding ACR suffix to public cloud environment.
- Updating Glide dependencies.
## v7.2.5
- Fixed the Active Directory endpoint for the China cloud.
- Removes UTF-8 BOM if present in response payload.
- Added telemetry.
## v7.2.3
- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay
duration.
## v7.2.2
- autorest/azure: added ASM and ARM VM DNS suffixes.
## v7.2.1
- fixed parsing of UTC times that are not RFC3339 conformant.
## v7.2.0
- autorest/validation: Reformat validation error for better error message.
## v7.1.0
- preparer: Added support for multipart formdata - WithMultiPartFormdata()
- preparer: Added support for sending file in request body - WithFile
- client: Added RetryDuration parameter.
- autorest/validation: new package for validation code for Azure Go SDK.
## v7.0.7
- Add trailing / to endpoint
- azure: add EnvironmentFromName
## v7.0.6
- Add retry logic for 408, 500, 502, 503 and 504 status codes.
- Change url path and query encoding logic.
- Fix DelayForBackoff for proper exponential delay.
- Add CookieJar in Client.
## v7.0.5
- Add check to start polling only when status is in [200,201,202].
- Refactoring for unchecked errors.
- azure/persist changes.
- Fix 'file in use' issue in renewing token in deviceflow.
- Store header RetryAfter for subsequent requests in polling.
- Add attribute details in service error.
## v7.0.4
- Better error messages for long running operation failures
## v7.0.3
- Corrected DoPollForAsynchronous to properly handle the initial response
## v7.0.2
- Corrected DoPollForAsynchronous to continue using the polling method first discovered
## v7.0.1
- Fixed empty JSON input error in ByUnmarshallingJSON
- Fixed polling support for GET calls
- Changed format name from TimeRfc1123 to TimeRFC1123
## v7.0.0
- Added ByCopying responder with supporting TeeReadCloser
- Rewrote Azure asynchronous handling
- Reverted to only unmarshalling JSON
- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format
The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since
`encoding/json` successfully deserializes all core types, and extended types normally provide
their custom JSON serialization handlers, the code has been reverted back to using
`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate
code; there is no loss of function, and there is a gain in accuracy, by reverting.
Additionally, Azure services indicate requests to be polled by multiple means. The existing code
only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header).
The new code correctly covers all cases and aligns with the other Azure SDKs.
## v6.1.0
- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values.
## v6.0.0
- Completely reworked the handling of polled and asynchronous requests
- Removed unnecessary routines
- Reworked `mocks.Sender` to replay a series of `http.Response` objects
- Added `PrepareDecorators` for primitive types (e.g., bool, int32)
Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new
`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes`
and `azure.DoPollForAsynchronous` for examples.
## v5.0.0
- Added new RespondDecorators unmarshalling primitive types
- Corrected application of inspection and authorization PrependDecorators
## v4.0.0
- Added support for Azure long-running operations.
- Added cancelation support to all decorators and functions that may delay.
- Breaking: `DelayForBackoff` now accepts a channel, which may be nil.
## v3.1.0
- Add support for OAuth Device Flow authorization.
- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material.
- Add helpers for persisting and restoring Tokens.
- Increased code coverage in the github.com/Azure/autorest/azure package
## v3.0.0
- Breaking: `NewErrorWithError` no longer takes `statusCode int`.
- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`.
- Breaking: `Client#Send()` no longer takes `codes ...int` argument.
- Add: XML unmarshaling support with `ByUnmarshallingXML()`
- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide).
Applications using this library should either use Glide or vendor dependencies locally some other way.
- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors.
- Fix: use `net/http.DefaultClient` as base client.
- Fix: Missing inspection for polling responses added.
- Add: CopyAndDecode helpers.
- Improved `./autorest/to` with `[]string` helpers.
- Removed golint suppressions in .travis.yml.
## v2.1.0
- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any)
## v2.0.0
- Changed `to.StringMapPtr` method signature to return a pointer
- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys
## v1.0.0
- Added Logging inspectors to trace http.Request / Response
- Added support for User-Agent header
- Changed WithHeader PrepareDecorator to use set vs. add
- Added JSON to error when unmarshalling fails
- Added Client#Send method
- Corrected case of "Azure" in package paths
- Added "to" helpers, Azure helpers, and improved ease-of-use
- Corrected golint issues
## v1.0.1
- Added CHANGELOG.md
## v1.1.0
- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT
- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate
## v1.1.1
- Introduce godeps and vendor dependencies introduced in v1.1.1

23
vendor/github.com/Azure/go-autorest/GNUmakefile generated vendored Normal file
View file

@ -0,0 +1,23 @@
DIR?=./autorest/
default: build
build: fmt
go install $(DIR)
test:
go test $(DIR) || exit 1
vet:
@echo "go vet ."
@go vet $(DIR)... ; if [ $$? -eq 1 ]; then \
echo ""; \
echo "Vet found suspicious constructs. Please check the reported constructs"; \
echo "and fix them if necessary before submitting the code for review."; \
exit 1; \
fi
fmt:
gofmt -w $(DIR)
.PHONY: build test vet fmt

191
vendor/github.com/Azure/go-autorest/LICENSE generated vendored Normal file
View file

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

132
vendor/github.com/Azure/go-autorest/README.md generated vendored Normal file
View file

@ -0,0 +1,132 @@
# go-autorest
[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) [![Build Status](https://travis-ci.org/Azure/go-autorest.svg?branch=master)](https://travis-ci.org/Azure/go-autorest) [![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest)
## Usage
Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines
and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/)
generated Go code.
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
and Responding. A typical pattern is:
```go
req, err := Prepare(&http.Request{},
token.WithAuthorization())
resp, err := Send(req,
WithLogging(logger),
DoErrorIfStatusCode(http.StatusInternalServerError),
DoCloseIfError(),
DoRetryForAttempts(5, time.Second))
err = Respond(resp,
ByDiscardingBody(),
ByClosing())
```
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
and then pass the data along, pass the data first and then modify the result, or wrap themselves
around passing the data (such as a logger might do). Decorators run in the order provided. For
example, the following:
```go
req, err := Prepare(&http.Request{},
WithBaseURL("https://microsoft.com/"),
WithPath("a"),
WithPath("b"),
WithPath("c"))
```
will set the URL to:
```
https://microsoft.com/a/b/c
```
Preparers and Responders may be shared and re-used (assuming the underlying decorators support
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
all bound together by means of input / output channels.
Decorators hold their passed state within a closure (such as the path components in the example
above). Be careful to share Preparers and Responders only in a context where such held state
applies. For example, it may not make sense to share a Preparer that applies a query string from a
fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
struct (e.g., `ByUnmarshallingJson`) is likely incorrect.
Errors raised by autorest objects and methods will conform to the `autorest.Error` interface.
See the included examples for more detail. For details on the suggested use of this package by
generated clients, see the Client described below.
## Helpers
### Handling Swagger Dates
The Swagger specification (https://swagger.io) that drives AutoRest
(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct
parsing and formatting.
### Handling Empty Values
In JSON, missing values have different semantics than empty values. This is especially true for
services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains
only those values to modify. Missing values are to be left unchanged. Developers, then, require a
means to both specify an empty value and to leave the value out of the submitted JSON.
The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits
empty values from the rendered JSON. Since Go defines default values for all base types (such as ""
for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package
treats default values as meaning empty, omitting them from the rendered JSON. This means that, using
the Go base types encoded through the default JSON package, it is not possible to create JSON to
clear a value at the server.
The workaround within the Go community is to use pointers to base types in lieu of base types within
structures that map to JSON. For example, instead of a value of type `string`, the workaround uses
`*string`. While this enables distinguishing empty values from those to be unchanged, creating
pointers to a base type (notably constant, in-line values) requires additional variables. This, for
example,
```go
s := struct {
S *string
}{ S: &"foo" }
```
fails, while, this
```go
v := "foo"
s := struct {
S *string
}{ S: &v }
```
succeeds.
To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for
Go base types which have Swagger analogs. It also provides a helper that converts between
`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value
associated with a key should be cleared. With the helpers, the previous example becomes
```go
s := struct {
S *string
}{ S: to.StringPtr("foo") }
```
## Install
```bash
go get github.com/Azure/go-autorest/autorest
go get github.com/Azure/go-autorest/autorest/azure
go get github.com/Azure/go-autorest/autorest/date
go get github.com/Azure/go-autorest/autorest/to
```
## License
See LICENSE file.
-----
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View file

@ -0,0 +1,253 @@
# Azure Active Directory library for Go
This project provides a stand alone Azure Active Directory library for Go. The code was extracted
from [go-autorest](https://github.com/Azure/go-autorest/) project, which is used as a base for
[azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go).
## Installation
```
go get -u github.com/Azure/go-autorest/autorest/adal
```
## Usage
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) follow these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
### Register an Azure AD Application with secret
1. Register a new application with a `secret` credential
```
az ad app create \
--display-name example-app \
--homepage https://example-app/home \
--identifier-uris https://example-app/app \
--password secret
```
2. Create a service principal using the `Application ID` from previous step
```
az ad sp create --id "Application ID"
```
* Replace `Application ID` with `appId` from step 1.
### Register an Azure AD Application with certificate
1. Create a private key
```
openssl genrsa -out "example-app.key" 2048
```
2. Create the certificate
```
openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr"
openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000
```
3. Create the PKCS12 version of the certificate containing also the private key
```
openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass:
```
4. Register a new application with the certificate content form `example-app.crt`
```
certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)"
az ad app create \
--display-name example-app \
--homepage https://example-app/home \
--identifier-uris https://example-app/app \
--key-usage Verify --end-date 2018-01-01 \
--key-value "${certificateContents}"
```
5. Create a service principal using the `Application ID` from previous step
```
az ad sp create --id "APPLICATION_ID"
```
* Replace `APPLICATION_ID` with `appId` from step 4.
### Grant the necessary permissions
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles)
which can be assigned to a service principal of an Azure AD application depending of your needs.
```
az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME"
```
* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step.
* Replace the `ROLE_NAME` with a role name of your choice.
It is also possible to define custom role definitions.
```
az role definition create --role-definition role-definition.json
```
* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
### Acquire Access Token
The common configuration used by all flows:
```Go
const activeDirectoryEndpoint = "https://login.microsoftonline.com/"
tenantID := "TENANT_ID"
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID)
applicationID := "APPLICATION_ID"
callback := func(token adal.Token) error {
// This is called after the token is acquired
}
// The resource for which the token is acquired
resource := "https://management.core.windows.net/"
```
* Replace the `TENANT_ID` with your tenant ID.
* Replace the `APPLICATION_ID` with the value from previous section.
#### Client Credentials
```Go
applicationSecret := "APPLICATION_SECRET"
spt, err := adal.NewServicePrincipalToken(
oauthConfig,
appliationID,
applicationSecret,
resource,
callbacks...)
if err != nil {
return nil, err
}
// Acquire a new access token
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
* Replace the `APPLICATION_SECRET` with the `password` value from previous section.
#### Client Certificate
```Go
certificatePath := "./example-app.pfx"
certData, err := ioutil.ReadFile(certificatePath)
if err != nil {
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
}
// Get the certificate and private key from pfx file
certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
if err != nil {
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
}
spt, err := adal.NewServicePrincipalTokenFromCertificate(
oauthConfig,
applicationID,
certificate,
rsaPrivateKey,
resource,
callbacks...)
// Acquire a new access token
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
* Update the certificate path to point to the example-app.pfx file which was created in previous section.
#### Device Code
```Go
oauthClient := &http.Client{}
// Acquire the device code
deviceCode, err := adal.InitiateDeviceAuth(
oauthClient,
oauthConfig,
applicationID,
resource)
if err != nil {
return nil, fmt.Errorf("Failed to start device auth flow: %s", err)
}
// Display the authentication message
fmt.Println(*deviceCode.Message)
// Wait here until the user is authenticated
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
if err != nil {
return nil, fmt.Errorf("Failed to finish device auth flow: %s", err)
}
spt, err := adal.NewServicePrincipalTokenFromManualToken(
oauthConfig,
applicationID,
resource,
*token,
callbacks...)
if (err == nil) {
token := spt.Token
}
```
### Command Line Tool
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.
```
adal -h
Usage of ./adal:
-applicationId string
application id
-certificatePath string
path to pk12/PFC application certificate
-mode string
authentication mode (device, secret, cert, refresh) (default "device")
-resource string
resource for which the token is requested
-secret string
application secret
-tenantId string
tenant id
-tokenCachePath string
location of oath token cache (default "/home/cgc/.adal/accessToken.json")
```
Example acquire a token for `https://management.core.windows.net/` using device code flow:
```
adal -mode device \
-applicationId "APPLICATION_ID" \
-tenantId "TENANT_ID" \
-resource https://management.core.windows.net/
```

View file

@ -0,0 +1,65 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"net/url"
)
const (
activeDirectoryAPIVersion = "1.0"
)
// OAuthConfig represents the endpoints needed
// in OAuth operations
type OAuthConfig struct {
AuthorityEndpoint url.URL
AuthorizeEndpoint url.URL
TokenEndpoint url.URL
DeviceCodeEndpoint url.URL
}
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s"
u, err := url.Parse(activeDirectoryEndpoint)
if err != nil {
return nil, err
}
authorityURL, err := u.Parse(tenantID)
if err != nil {
return nil, err
}
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion))
if err != nil {
return nil, err
}
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion))
if err != nil {
return nil, err
}
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion))
if err != nil {
return nil, err
}
return &OAuthConfig{
AuthorityEndpoint: *authorityURL,
AuthorizeEndpoint: *authorizeURL,
TokenEndpoint: *tokenURL,
DeviceCodeEndpoint: *deviceCodeURL,
}, nil
}

View file

@ -0,0 +1,44 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"testing"
)
func TestNewOAuthConfig(t *testing.T) {
const testActiveDirectoryEndpoint = "https://login.test.com"
const testTenantID = "tenant-id-test"
config, err := NewOAuthConfig(testActiveDirectoryEndpoint, testTenantID)
if err != nil {
t.Fatalf("autorest/adal: Unexpected error while creating oauth configuration for tenant: %v.", err)
}
expected := "https://login.test.com/tenant-id-test/oauth2/authorize?api-version=1.0"
if config.AuthorizeEndpoint.String() != expected {
t.Fatalf("autorest/adal: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.AuthorizeEndpoint)
}
expected = "https://login.test.com/tenant-id-test/oauth2/token?api-version=1.0"
if config.TokenEndpoint.String() != expected {
t.Fatalf("autorest/adal: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.TokenEndpoint)
}
expected = "https://login.test.com/tenant-id-test/oauth2/devicecode?api-version=1.0"
if config.DeviceCodeEndpoint.String() != expected {
t.Fatalf("autorest/adal Incorrect devicecode url for Tenant from Environment. expected(%s). actual(%v).", expected, config.DeviceCodeEndpoint)
}
}

View file

@ -0,0 +1,242 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
This file is largely based on rjw57/oauth2device's code, with the follow differences:
* scope -> resource, and only allow a single one
* receive "Message" in the DeviceCode struct and show it to users as the prompt
* azure-xplat-cli has the following behavior that this emulates:
- does not send client_secret during the token exchange
- sends resource again in the token exchange request
*/
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
)
const (
logPrefix = "autorest/adal/devicetoken:"
)
var (
// ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow
ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix)
// ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow
ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix)
// ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow
ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix)
// ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow
ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix)
// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
// ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow
ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix)
// ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow
ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix)
errCodeSendingFails = "Error occurred while sending request for Device Authorization Code"
errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint"
errTokenSendingFails = "Error occurred while sending request with device code for a token"
errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
errStatusNotOK = "Error HTTP status != 200"
)
// DeviceCode is the object returned by the device auth endpoint
// It contains information to instruct the user to complete the auth flow
type DeviceCode struct {
DeviceCode *string `json:"device_code,omitempty"`
UserCode *string `json:"user_code,omitempty"`
VerificationURL *string `json:"verification_url,omitempty"`
ExpiresIn *int64 `json:"expires_in,string,omitempty"`
Interval *int64 `json:"interval,string,omitempty"`
Message *string `json:"message"` // Azure specific
Resource string // store the following, stored when initiating, used when exchanging
OAuthConfig OAuthConfig
ClientID string
}
// TokenError is the object returned by the token exchange endpoint
// when something is amiss
type TokenError struct {
Error *string `json:"error,omitempty"`
ErrorCodes []int `json:"error_codes,omitempty"`
ErrorDescription *string `json:"error_description,omitempty"`
Timestamp *string `json:"timestamp,omitempty"`
TraceID *string `json:"trace_id,omitempty"`
}
// DeviceToken is the object return by the token exchange endpoint
// It can either look like a Token or an ErrorToken, so put both here
// and check for presence of "Error" to know if we are in error state
type deviceToken struct {
Token
TokenError
}
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
v := url.Values{
"client_id": []string{clientID},
"resource": []string{resource},
}
s := v.Encode()
body := ioutil.NopCloser(strings.NewReader(s))
req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
}
req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost)
resp, err := sender.Do(req)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
}
defer resp.Body.Close()
rb, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK)
}
if len(strings.Trim(string(rb), " ")) == 0 {
return nil, ErrDeviceCodeEmpty
}
var code DeviceCode
err = json.Unmarshal(rb, &code)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
}
code.ClientID = clientID
code.Resource = resource
code.OAuthConfig = oauthConfig
return &code, nil
}
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
// to see if the device flow has: been completed, timed out, or otherwise failed
func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
v := url.Values{
"client_id": []string{code.ClientID},
"code": []string{*code.DeviceCode},
"grant_type": []string{OAuthGrantTypeDeviceCode},
"resource": []string{code.Resource},
}
s := v.Encode()
body := ioutil.NopCloser(strings.NewReader(s))
req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
}
req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost)
resp, err := sender.Do(req)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
}
defer resp.Body.Close()
rb, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
}
if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK)
}
if len(strings.Trim(string(rb), " ")) == 0 {
return nil, ErrOAuthTokenEmpty
}
var token deviceToken
err = json.Unmarshal(rb, &token)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
}
if token.Error == nil {
return &token.Token, nil
}
switch *token.Error {
case "authorization_pending":
return nil, ErrDeviceAuthorizationPending
case "slow_down":
return nil, ErrDeviceSlowDown
case "access_denied":
return nil, ErrDeviceAccessDenied
case "code_expired":
return nil, ErrDeviceCodeExpired
default:
return nil, ErrDeviceGeneric
}
}
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
intervalDuration := time.Duration(*code.Interval) * time.Second
waitDuration := intervalDuration
for {
token, err := CheckForUserCompletion(sender, code)
if err == nil {
return token, nil
}
switch err {
case ErrDeviceSlowDown:
waitDuration += waitDuration
case ErrDeviceAuthorizationPending:
// noop
default: // everything else is "fatal" to us
return nil, err
}
if waitDuration > (intervalDuration * 3) {
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
}
time.Sleep(waitDuration)
}
}

View file

@ -0,0 +1,330 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/mocks"
)
const (
TestResource = "SomeResource"
TestClientID = "SomeClientID"
TestTenantID = "SomeTenantID"
TestActiveDirectoryEndpoint = "https://login.test.com/"
)
var (
testOAuthConfig, _ = NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID)
TestOAuthConfig = *testOAuthConfig
)
const MockDeviceCodeResponse = `
{
"device_code": "10000-40-1234567890",
"user_code": "ABCDEF",
"verification_url": "http://aka.ms/deviceauth",
"expires_in": "900",
"interval": "0"
}
`
const MockDeviceTokenResponse = `{
"access_token": "accessToken",
"refresh_token": "refreshToken",
"expires_in": "1000",
"expires_on": "2000",
"not_before": "3000",
"resource": "resource",
"token_type": "type"
}
`
func TestDeviceCodeIncludesResource(t *testing.T) {
sender := mocks.NewSender()
sender.AppendResponse(mocks.NewResponseWithContent(MockDeviceCodeResponse))
code, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource)
if err != nil {
t.Fatalf("adal: unexpected error initiating device auth")
}
if code.Resource != TestResource {
t.Fatalf("adal: InitiateDeviceAuth failed to stash the resource in the DeviceCode struct")
}
}
func TestDeviceCodeReturnsErrorIfSendingFails(t *testing.T) {
sender := mocks.NewSender()
sender.SetError(fmt.Errorf("this is an error"))
_, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource)
if err == nil || !strings.Contains(err.Error(), errCodeSendingFails) {
t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeSendingFails, err.Error())
}
}
func TestDeviceCodeReturnsErrorIfBadRequest(t *testing.T) {
sender := mocks.NewSender()
body := mocks.NewBody("doesn't matter")
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request"))
_, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource)
if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) {
t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error())
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}
func TestDeviceCodeReturnsErrorIfCannotDeserializeDeviceCode(t *testing.T) {
gibberishJSON := strings.Replace(MockDeviceCodeResponse, "expires_in", "\":, :gibberish", -1)
sender := mocks.NewSender()
body := mocks.NewBody(gibberishJSON)
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK"))
_, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource)
if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) {
t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error())
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}
func TestDeviceCodeReturnsErrorIfEmptyDeviceCode(t *testing.T) {
sender := mocks.NewSender()
body := mocks.NewBody("")
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK"))
_, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource)
if err != ErrDeviceCodeEmpty {
t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", ErrDeviceCodeEmpty, err.Error())
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}
func deviceCode() *DeviceCode {
var deviceCode DeviceCode
_ = json.Unmarshal([]byte(MockDeviceCodeResponse), &deviceCode)
deviceCode.Resource = TestResource
deviceCode.ClientID = TestClientID
return &deviceCode
}
func TestDeviceTokenReturns(t *testing.T) {
sender := mocks.NewSender()
body := mocks.NewBody(MockDeviceTokenResponse)
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK"))
_, err := WaitForUserCompletion(sender, deviceCode())
if err != nil {
t.Fatalf("adal: got error unexpectedly")
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}
func TestDeviceTokenReturnsErrorIfSendingFails(t *testing.T) {
sender := mocks.NewSender()
sender.SetError(fmt.Errorf("this is an error"))
_, err := WaitForUserCompletion(sender, deviceCode())
if err == nil || !strings.Contains(err.Error(), errTokenSendingFails) {
t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenSendingFails, err.Error())
}
}
func TestDeviceTokenReturnsErrorIfServerError(t *testing.T) {
sender := mocks.NewSender()
body := mocks.NewBody("")
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusInternalServerError, "Internal Server Error"))
_, err := WaitForUserCompletion(sender, deviceCode())
if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) {
t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error())
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}
func TestDeviceTokenReturnsErrorIfCannotDeserializeDeviceToken(t *testing.T) {
gibberishJSON := strings.Replace(MockDeviceTokenResponse, "expires_in", ";:\"gibberish", -1)
sender := mocks.NewSender()
body := mocks.NewBody(gibberishJSON)
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK"))
_, err := WaitForUserCompletion(sender, deviceCode())
if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) {
t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error())
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}
func errorDeviceTokenResponse(message string) string {
return `{ "error": "` + message + `" }`
}
func TestDeviceTokenReturnsErrorIfAuthorizationPending(t *testing.T) {
sender := mocks.NewSender()
body := mocks.NewBody(errorDeviceTokenResponse("authorization_pending"))
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request"))
_, err := CheckForUserCompletion(sender, deviceCode())
if err != ErrDeviceAuthorizationPending {
t.Fatalf("!!!")
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}
func TestDeviceTokenReturnsErrorIfSlowDown(t *testing.T) {
sender := mocks.NewSender()
body := mocks.NewBody(errorDeviceTokenResponse("slow_down"))
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request"))
_, err := CheckForUserCompletion(sender, deviceCode())
if err != ErrDeviceSlowDown {
t.Fatalf("!!!")
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}
type deviceTokenSender struct {
errorString string
attempts int
}
func newDeviceTokenSender(deviceErrorString string) *deviceTokenSender {
return &deviceTokenSender{errorString: deviceErrorString, attempts: 0}
}
func (s *deviceTokenSender) Do(req *http.Request) (*http.Response, error) {
var resp *http.Response
if s.attempts < 1 {
s.attempts++
resp = mocks.NewResponseWithContent(errorDeviceTokenResponse(s.errorString))
} else {
resp = mocks.NewResponseWithContent(MockDeviceTokenResponse)
}
return resp, nil
}
// since the above only exercise CheckForUserCompletion, we repeat the test here,
// but with the intent of showing that WaitForUserCompletion loops properly.
func TestDeviceTokenSucceedsWithIntermediateAuthPending(t *testing.T) {
sender := newDeviceTokenSender("authorization_pending")
_, err := WaitForUserCompletion(sender, deviceCode())
if err != nil {
t.Fatalf("unexpected error occurred")
}
}
// same as above but with SlowDown now
func TestDeviceTokenSucceedsWithIntermediateSlowDown(t *testing.T) {
sender := newDeviceTokenSender("slow_down")
_, err := WaitForUserCompletion(sender, deviceCode())
if err != nil {
t.Fatalf("unexpected error occurred")
}
}
func TestDeviceTokenReturnsErrorIfAccessDenied(t *testing.T) {
sender := mocks.NewSender()
body := mocks.NewBody(errorDeviceTokenResponse("access_denied"))
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request"))
_, err := WaitForUserCompletion(sender, deviceCode())
if err != ErrDeviceAccessDenied {
t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceAccessDenied.Error(), err.Error())
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}
func TestDeviceTokenReturnsErrorIfCodeExpired(t *testing.T) {
sender := mocks.NewSender()
body := mocks.NewBody(errorDeviceTokenResponse("code_expired"))
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request"))
_, err := WaitForUserCompletion(sender, deviceCode())
if err != ErrDeviceCodeExpired {
t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceCodeExpired.Error(), err.Error())
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}
func TestDeviceTokenReturnsErrorForUnknownError(t *testing.T) {
sender := mocks.NewSender()
body := mocks.NewBody(errorDeviceTokenResponse("unknown_error"))
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request"))
_, err := WaitForUserCompletion(sender, deviceCode())
if err == nil {
t.Fatalf("failed to get error")
}
if err != ErrDeviceGeneric {
t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceGeneric.Error(), err.Error())
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}
func TestDeviceTokenReturnsErrorIfTokenEmptyAndStatusOK(t *testing.T) {
sender := mocks.NewSender()
body := mocks.NewBody("")
sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK"))
_, err := WaitForUserCompletion(sender, deviceCode())
if err != ErrOAuthTokenEmpty {
t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrOAuthTokenEmpty.Error(), err.Error())
}
if body.IsOpen() {
t.Fatalf("response body was left open!")
}
}

View file

@ -0,0 +1,20 @@
// +build !windows
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// msiPath is the path to the MSI Extension settings file (to discover the endpoint)
var msiPath = "/var/lib/waagent/ManagedIdentity-Settings"

View file

@ -0,0 +1,25 @@
// +build windows
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"os"
"strings"
)
// msiPath is the path to the MSI Extension settings file (to discover the endpoint)
var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/")

View file

@ -0,0 +1,73 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
)
// LoadToken restores a Token object from a file located at 'path'.
func LoadToken(path string) (*Token, error) {
file, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
}
defer file.Close()
var token Token
dec := json.NewDecoder(file)
if err = dec.Decode(&token); err != nil {
return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err)
}
return &token, nil
}
// SaveToken persists an oauth token at the given location on disk.
// It moves the new file into place so it can safely be used to replace an existing file
// that maybe accessed by multiple processes.
func SaveToken(path string, mode os.FileMode, token Token) error {
dir := filepath.Dir(path)
err := os.MkdirAll(dir, os.ModePerm)
if err != nil {
return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err)
}
newFile, err := ioutil.TempFile(dir, "token")
if err != nil {
return fmt.Errorf("failed to create the temp file to write the token: %v", err)
}
tempPath := newFile.Name()
if err := json.NewEncoder(newFile).Encode(token); err != nil {
return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err)
}
if err := newFile.Close(); err != nil {
return fmt.Errorf("failed to close temp file %s: %v", tempPath, err)
}
// Atomic replace to avoid multi-writer file corruptions
if err := os.Rename(tempPath, path); err != nil {
return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err)
}
if err := os.Chmod(path, mode); err != nil {
return fmt.Errorf("failed to chmod the token file %s: %v", path, err)
}
return nil
}

View file

@ -0,0 +1,171 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"io/ioutil"
"os"
"path"
"reflect"
"runtime"
"strings"
"testing"
)
const MockTokenJSON string = `{
"access_token": "accessToken",
"refresh_token": "refreshToken",
"expires_in": "1000",
"expires_on": "2000",
"not_before": "3000",
"resource": "resource",
"token_type": "type"
}`
var TestToken = Token{
AccessToken: "accessToken",
RefreshToken: "refreshToken",
ExpiresIn: "1000",
ExpiresOn: "2000",
NotBefore: "3000",
Resource: "resource",
Type: "type",
}
func writeTestTokenFile(t *testing.T, suffix string, contents string) *os.File {
f, err := ioutil.TempFile(os.TempDir(), suffix)
if err != nil {
t.Fatalf("azure: unexpected error when creating temp file: %v", err)
}
defer f.Close()
_, err = f.Write([]byte(contents))
if err != nil {
t.Fatalf("azure: unexpected error when writing temp test file: %v", err)
}
return f
}
func TestLoadToken(t *testing.T) {
f := writeTestTokenFile(t, "testloadtoken", MockTokenJSON)
defer os.Remove(f.Name())
expectedToken := TestToken
actualToken, err := LoadToken(f.Name())
if err != nil {
t.Fatalf("azure: unexpected error loading token from file: %v", err)
}
if *actualToken != expectedToken {
t.Fatalf("azure: failed to decode properly expected(%v) actual(%v)", expectedToken, *actualToken)
}
// test that LoadToken closes the file properly
err = SaveToken(f.Name(), 0600, *actualToken)
if err != nil {
t.Fatalf("azure: could not save token after LoadToken: %v", err)
}
}
func TestLoadTokenFailsBadPath(t *testing.T) {
_, err := LoadToken("/tmp/this_file_should_never_exist_really")
expectedSubstring := "failed to open file"
if err == nil || !strings.Contains(err.Error(), expectedSubstring) {
t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error())
}
}
func TestLoadTokenFailsBadJson(t *testing.T) {
gibberishJSON := strings.Replace(MockTokenJSON, "expires_on", ";:\"gibberish", -1)
f := writeTestTokenFile(t, "testloadtokenfailsbadjson", gibberishJSON)
defer os.Remove(f.Name())
_, err := LoadToken(f.Name())
expectedSubstring := "failed to decode contents of file"
if err == nil || !strings.Contains(err.Error(), expectedSubstring) {
t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error())
}
}
func token() *Token {
var token Token
json.Unmarshal([]byte(MockTokenJSON), &token)
return &token
}
func TestSaveToken(t *testing.T) {
f, err := ioutil.TempFile("", "testloadtoken")
if err != nil {
t.Fatalf("azure: unexpected error when creating temp file: %v", err)
}
defer os.Remove(f.Name())
f.Close()
mode := os.ModePerm & 0642
err = SaveToken(f.Name(), mode, *token())
if err != nil {
t.Fatalf("azure: unexpected error saving token to file: %v", err)
}
fi, err := os.Stat(f.Name()) // open a new stat as held ones are not fresh
if err != nil {
t.Fatalf("azure: stat failed: %v", err)
}
if runtime.GOOS != "windows" { // permissions don't work on Windows
if perm := fi.Mode().Perm(); perm != mode {
t.Fatalf("azure: wrong file perm. got:%s; expected:%s file :%s", perm, mode, f.Name())
}
}
var actualToken Token
var expectedToken Token
json.Unmarshal([]byte(MockTokenJSON), expectedToken)
contents, err := ioutil.ReadFile(f.Name())
if err != nil {
t.Fatal("!!")
}
json.Unmarshal(contents, actualToken)
if !reflect.DeepEqual(actualToken, expectedToken) {
t.Fatal("azure: token was not serialized correctly")
}
}
func TestSaveTokenFailsNoPermission(t *testing.T) {
pathWhereWeShouldntHavePermission := "/usr/thiswontwork/atall"
if runtime.GOOS == "windows" {
pathWhereWeShouldntHavePermission = path.Join(os.Getenv("windir"), "system32\\mytokendir\\mytoken")
}
err := SaveToken(pathWhereWeShouldntHavePermission, 0644, *token())
expectedSubstring := "failed to create directory"
if err == nil || !strings.Contains(err.Error(), expectedSubstring) {
t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err)
}
}
func TestSaveTokenFailsCantCreate(t *testing.T) {
tokenPath := "/thiswontwork"
if runtime.GOOS == "windows" {
tokenPath = path.Join(os.Getenv("windir"), "system32")
}
err := SaveToken(tokenPath, 0644, *token())
expectedSubstring := "failed to create the temp file to write the token"
if err == nil || !strings.Contains(err.Error(), expectedSubstring) {
t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err)
}
}

View file

@ -0,0 +1,60 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"net/http"
)
const (
contentType = "Content-Type"
mimeTypeFormPost = "application/x-www-form-urlencoded"
)
// Sender is the interface that wraps the Do method to send HTTP requests.
//
// The standard http.Client conforms to this interface.
type Sender interface {
Do(*http.Request) (*http.Response, error)
}
// SenderFunc is a method that implements the Sender interface.
type SenderFunc func(*http.Request) (*http.Response, error)
// Do implements the Sender interface on SenderFunc.
func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
return sf(r)
}
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
// http.Request and pass it along or, first, pass the http.Request along then react to the
// http.Response result.
type SendDecorator func(Sender) Sender
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
func CreateSender(decorators ...SendDecorator) Sender {
return DecorateSender(&http.Client{}, decorators...)
}
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
// the Sender. Decorators are applied in the order received, but their affect upon the request
// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
// post-decorator (pass the http.Request along and react to the results in http.Response).
func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
for _, decorate := range decorators {
s = decorate(s)
}
return s
}

View file

@ -0,0 +1,427 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/Azure/go-autorest/autorest/date"
"github.com/dgrijalva/jwt-go"
)
const (
defaultRefresh = 5 * time.Minute
// OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow
OAuthGrantTypeDeviceCode = "device_code"
// OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows
OAuthGrantTypeClientCredentials = "client_credentials"
// OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows
OAuthGrantTypeRefreshToken = "refresh_token"
// metadataHeader is the header required by MSI extension
metadataHeader = "Metadata"
)
// OAuthTokenProvider is an interface which should be implemented by an access token retriever
type OAuthTokenProvider interface {
OAuthToken() string
}
// Refresher is an interface for token refresh functionality
type Refresher interface {
Refresh() error
RefreshExchange(resource string) error
EnsureFresh() error
}
// TokenRefreshCallback is the type representing callbacks that will be called after
// a successful token refresh
type TokenRefreshCallback func(Token) error
// Token encapsulates the access token used to authorize Azure requests.
type Token struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn string `json:"expires_in"`
ExpiresOn string `json:"expires_on"`
NotBefore string `json:"not_before"`
Resource string `json:"resource"`
Type string `json:"token_type"`
}
// Expires returns the time.Time when the Token expires.
func (t Token) Expires() time.Time {
s, err := strconv.Atoi(t.ExpiresOn)
if err != nil {
s = -3600
}
expiration := date.NewUnixTimeFromSeconds(float64(s))
return time.Time(expiration).UTC()
}
// IsExpired returns true if the Token is expired, false otherwise.
func (t Token) IsExpired() bool {
return t.WillExpireIn(0)
}
// WillExpireIn returns true if the Token will expire after the passed time.Duration interval
// from now, false otherwise.
func (t Token) WillExpireIn(d time.Duration) bool {
return !t.Expires().After(time.Now().Add(d))
}
//OAuthToken return the current access token
func (t *Token) OAuthToken() string {
return t.AccessToken
}
// ServicePrincipalNoSecret represents a secret type that contains no secret
// meaning it is not valid for fetching a fresh token. This is used by Manual
type ServicePrincipalNoSecret struct {
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret
// It only returns an error for the ServicePrincipalNoSecret type
func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token")
}
// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form
// that is submitted when acquiring an oAuth token.
type ServicePrincipalSecret interface {
SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error
}
// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization.
type ServicePrincipalTokenSecret struct {
ClientSecret string
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
// It will populate the form submitted during oAuth Token Acquisition using the client_secret.
func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
v.Set("client_secret", tokenSecret.ClientSecret)
return nil
}
// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs.
type ServicePrincipalCertificateSecret struct {
Certificate *x509.Certificate
PrivateKey *rsa.PrivateKey
}
// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension.
type ServicePrincipalMSISecret struct {
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
return nil
}
// SignJwt returns the JWT signed with the certificate's private key.
func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) {
hasher := sha1.New()
_, err := hasher.Write(secret.Certificate.Raw)
if err != nil {
return "", err
}
thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
// The jti (JWT ID) claim provides a unique identifier for the JWT.
jti := make([]byte, 20)
_, err = rand.Read(jti)
if err != nil {
return "", err
}
token := jwt.New(jwt.SigningMethodRS256)
token.Header["x5t"] = thumbprint
token.Claims = jwt.MapClaims{
"aud": spt.oauthConfig.TokenEndpoint.String(),
"iss": spt.clientID,
"sub": spt.clientID,
"jti": base64.URLEncoding.EncodeToString(jti),
"nbf": time.Now().Unix(),
"exp": time.Now().Add(time.Hour * 24).Unix(),
}
signedString, err := token.SignedString(secret.PrivateKey)
return signedString, err
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate.
func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
jwt, err := secret.SignJwt(spt)
if err != nil {
return err
}
v.Set("client_assertion", jwt)
v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer")
return nil
}
// ServicePrincipalToken encapsulates a Token created for a Service Principal.
type ServicePrincipalToken struct {
Token
secret ServicePrincipalSecret
oauthConfig OAuthConfig
clientID string
resource string
autoRefresh bool
refreshWithin time.Duration
sender Sender
refreshCallbacks []TokenRefreshCallback
}
// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation.
func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
spt := &ServicePrincipalToken{
oauthConfig: oauthConfig,
secret: secret,
clientID: id,
resource: resource,
autoRefresh: true,
refreshWithin: defaultRefresh,
sender: &http.Client{},
refreshCallbacks: callbacks,
}
return spt, nil
}
// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token
func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
spt, err := NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
resource,
&ServicePrincipalNoSecret{},
callbacks...)
if err != nil {
return nil, err
}
spt.Token = token
return spt, nil
}
// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal
// credentials scoped to the named resource.
func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
resource,
&ServicePrincipalTokenSecret{
ClientSecret: secret,
},
callbacks...,
)
}
// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes.
func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
resource,
&ServicePrincipalCertificateSecret{
PrivateKey: privateKey,
Certificate: certificate,
},
callbacks...,
)
}
// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines.
func GetMSIVMEndpoint() (string, error) {
return getMSIVMEndpoint(msiPath)
}
func getMSIVMEndpoint(path string) (string, error) {
// Read MSI settings
bytes, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
msiSettings := struct {
URL string `json:"url"`
}{}
err = json.Unmarshal(bytes, &msiSettings)
if err != nil {
return "", err
}
return msiSettings.URL, nil
}
// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
// We set the oauth config token endpoint to be MSI's endpoint
msiEndpointURL, err := url.Parse(msiEndpoint)
if err != nil {
return nil, err
}
oauthConfig, err := NewOAuthConfig(msiEndpointURL.String(), "")
if err != nil {
return nil, err
}
spt := &ServicePrincipalToken{
oauthConfig: *oauthConfig,
secret: &ServicePrincipalMSISecret{},
resource: resource,
autoRefresh: true,
refreshWithin: defaultRefresh,
sender: &http.Client{},
refreshCallbacks: callbacks,
}
return spt, nil
}
// EnsureFresh will refresh the token if it will expire within the refresh window (as set by
// RefreshWithin) and autoRefresh flag is on.
func (spt *ServicePrincipalToken) EnsureFresh() error {
if spt.autoRefresh && spt.WillExpireIn(spt.refreshWithin) {
return spt.Refresh()
}
return nil
}
// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization
func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
if spt.refreshCallbacks != nil {
for _, callback := range spt.refreshCallbacks {
err := callback(spt.Token)
if err != nil {
return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err)
}
}
}
return nil
}
// Refresh obtains a fresh token for the Service Principal.
func (spt *ServicePrincipalToken) Refresh() error {
return spt.refreshInternal(spt.resource)
}
// RefreshExchange refreshes the token, but for a different resource.
func (spt *ServicePrincipalToken) RefreshExchange(resource string) error {
return spt.refreshInternal(resource)
}
func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
v := url.Values{}
v.Set("client_id", spt.clientID)
v.Set("resource", resource)
if spt.RefreshToken != "" {
v.Set("grant_type", OAuthGrantTypeRefreshToken)
v.Set("refresh_token", spt.RefreshToken)
} else {
v.Set("grant_type", OAuthGrantTypeClientCredentials)
err := spt.secret.SetAuthenticationValues(spt, &v)
if err != nil {
return err
}
}
s := v.Encode()
body := ioutil.NopCloser(strings.NewReader(s))
req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), body)
if err != nil {
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
}
req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost)
if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok {
req.Header.Set(metadataHeader, "true")
}
resp, err := spt.sender.Do(req)
if err != nil {
return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err)
}
defer resp.Body.Close()
rb, err := ioutil.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
if err != nil {
return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode)
}
return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb))
}
if err != nil {
return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err)
}
if len(strings.Trim(string(rb), " ")) == 0 {
return fmt.Errorf("adal: Empty service principal token received during refresh")
}
var token Token
err = json.Unmarshal(rb, &token)
if err != nil {
return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb))
}
spt.Token = token
return spt.InvokeRefreshCallbacks(token)
}
// SetAutoRefresh enables or disables automatic refreshing of stale tokens.
func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) {
spt.autoRefresh = autoRefresh
}
// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will
// refresh the token.
func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) {
spt.refreshWithin = d
return
}
// SetSender sets the http.Client used when obtaining the Service Principal token. An
// undecorated http.Client is used by default.
func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s }

View file

@ -0,0 +1,654 @@
package adal
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"io/ioutil"
"math/big"
"net/http"
"net/url"
"os"
"reflect"
"strconv"
"strings"
"testing"
"time"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/mocks"
)
const (
defaultFormData = "client_id=id&client_secret=secret&grant_type=client_credentials&resource=resource"
defaultManualFormData = "client_id=id&grant_type=refresh_token&refresh_token=refreshtoken&resource=resource"
)
func TestTokenExpires(t *testing.T) {
tt := time.Now().Add(5 * time.Second)
tk := newTokenExpiresAt(tt)
if tk.Expires().Equal(tt) {
t.Fatalf("adal: Token#Expires miscalculated expiration time -- received %v, expected %v", tk.Expires(), tt)
}
}
func TestTokenIsExpired(t *testing.T) {
tk := newTokenExpiresAt(time.Now().Add(-5 * time.Second))
if !tk.IsExpired() {
t.Fatalf("adal: Token#IsExpired failed to mark a stale token as expired -- now %v, token expires at %v",
time.Now().UTC(), tk.Expires())
}
}
func TestTokenIsExpiredUninitialized(t *testing.T) {
tk := &Token{}
if !tk.IsExpired() {
t.Fatalf("adal: An uninitialized Token failed to mark itself as expired (expiration time %v)", tk.Expires())
}
}
func TestTokenIsNoExpired(t *testing.T) {
tk := newTokenExpiresAt(time.Now().Add(1000 * time.Second))
if tk.IsExpired() {
t.Fatalf("adal: Token marked a fresh token as expired -- now %v, token expires at %v", time.Now().UTC(), tk.Expires())
}
}
func TestTokenWillExpireIn(t *testing.T) {
d := 5 * time.Second
tk := newTokenExpiresIn(d)
if !tk.WillExpireIn(d) {
t.Fatal("adal: Token#WillExpireIn mismeasured expiration time")
}
}
func TestServicePrincipalTokenSetAutoRefresh(t *testing.T) {
spt := newServicePrincipalToken()
if !spt.autoRefresh {
t.Fatal("adal: ServicePrincipalToken did not default to automatic token refreshing")
}
spt.SetAutoRefresh(false)
if spt.autoRefresh {
t.Fatal("adal: ServicePrincipalToken#SetAutoRefresh did not disable automatic token refreshing")
}
}
func TestServicePrincipalTokenSetRefreshWithin(t *testing.T) {
spt := newServicePrincipalToken()
if spt.refreshWithin != defaultRefresh {
t.Fatal("adal: ServicePrincipalToken did not correctly set the default refresh interval")
}
spt.SetRefreshWithin(2 * defaultRefresh)
if spt.refreshWithin != 2*defaultRefresh {
t.Fatal("adal: ServicePrincipalToken#SetRefreshWithin did not set the refresh interval")
}
}
func TestServicePrincipalTokenSetSender(t *testing.T) {
spt := newServicePrincipalToken()
c := &http.Client{}
spt.SetSender(c)
if !reflect.DeepEqual(c, spt.sender) {
t.Fatal("adal: ServicePrincipalToken#SetSender did not set the sender")
}
}
func TestServicePrincipalTokenRefreshUsesPOST(t *testing.T) {
spt := newServicePrincipalToken()
body := mocks.NewBody(newTokenJSON("test", "test"))
resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
if r.Method != "POST" {
t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method)
}
return resp, nil
})
}
})())
spt.SetSender(s)
err := spt.Refresh()
if err != nil {
t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err)
}
if body.IsOpen() {
t.Fatalf("the response was not closed!")
}
}
func TestServicePrincipalTokenFromMSIRefreshUsesPOST(t *testing.T) {
resource := "https://resource"
cb := func(token Token) error { return nil }
spt, err := NewServicePrincipalTokenFromMSI("http://msiendpoint/", resource, cb)
if err != nil {
t.Fatalf("Failed to get MSI SPT: %v", err)
}
body := mocks.NewBody(newTokenJSON("test", "test"))
resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
if r.Method != "POST" {
t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method)
}
if h := r.Header.Get("Metadata"); h != "true" {
t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set Metadata header for MSI")
}
return resp, nil
})
}
})())
spt.SetSender(s)
err = spt.Refresh()
if err != nil {
t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err)
}
if body.IsOpen() {
t.Fatalf("the response was not closed!")
}
}
func TestServicePrincipalTokenRefreshSetsMimeType(t *testing.T) {
spt := newServicePrincipalToken()
body := mocks.NewBody(newTokenJSON("test", "test"))
resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
if r.Header.Get(http.CanonicalHeaderKey("Content-Type")) != "application/x-www-form-urlencoded" {
t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set Content-Type -- expected %v, received %v",
"application/x-form-urlencoded",
r.Header.Get(http.CanonicalHeaderKey("Content-Type")))
}
return resp, nil
})
}
})())
spt.SetSender(s)
err := spt.Refresh()
if err != nil {
t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err)
}
}
func TestServicePrincipalTokenRefreshSetsURL(t *testing.T) {
spt := newServicePrincipalToken()
body := mocks.NewBody(newTokenJSON("test", "test"))
resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
if r.URL.String() != TestOAuthConfig.TokenEndpoint.String() {
t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the URL -- expected %v, received %v",
TestOAuthConfig.TokenEndpoint, r.URL)
}
return resp, nil
})
}
})())
spt.SetSender(s)
err := spt.Refresh()
if err != nil {
t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err)
}
}
func testServicePrincipalTokenRefreshSetsBody(t *testing.T, spt *ServicePrincipalToken, f func(*testing.T, []byte)) {
body := mocks.NewBody(newTokenJSON("test", "test"))
resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("adal: Failed to read body of Service Principal token request (%v)", err)
}
f(t, b)
return resp, nil
})
}
})())
spt.SetSender(s)
err := spt.Refresh()
if err != nil {
t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err)
}
}
func TestServicePrincipalTokenManualRefreshSetsBody(t *testing.T) {
sptManual := newServicePrincipalTokenManual()
testServicePrincipalTokenRefreshSetsBody(t, sptManual, func(t *testing.T, b []byte) {
if string(b) != defaultManualFormData {
t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v",
defaultManualFormData, string(b))
}
})
}
func TestServicePrincipalTokenCertficateRefreshSetsBody(t *testing.T) {
sptCert := newServicePrincipalTokenCertificate(t)
testServicePrincipalTokenRefreshSetsBody(t, sptCert, func(t *testing.T, b []byte) {
body := string(b)
values, _ := url.ParseQuery(body)
if values["client_assertion_type"][0] != "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" ||
values["client_id"][0] != "id" ||
values["grant_type"][0] != "client_credentials" ||
values["resource"][0] != "resource" {
t.Fatalf("adal: ServicePrincipalTokenCertificate#Refresh did not correctly set the HTTP Request Body.")
}
})
}
func TestServicePrincipalTokenSecretRefreshSetsBody(t *testing.T) {
spt := newServicePrincipalToken()
testServicePrincipalTokenRefreshSetsBody(t, spt, func(t *testing.T, b []byte) {
if string(b) != defaultFormData {
t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v",
defaultFormData, string(b))
}
})
}
func TestServicePrincipalTokenRefreshClosesRequestBody(t *testing.T) {
spt := newServicePrincipalToken()
body := mocks.NewBody(newTokenJSON("test", "test"))
resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
return resp, nil
})
}
})())
spt.SetSender(s)
err := spt.Refresh()
if err != nil {
t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err)
}
if resp.Body.(*mocks.Body).IsOpen() {
t.Fatal("adal: ServicePrincipalToken#Refresh failed to close the HTTP Response Body")
}
}
func TestServicePrincipalTokenRefreshRejectsResponsesWithStatusNotOK(t *testing.T) {
spt := newServicePrincipalToken()
body := mocks.NewBody(newTokenJSON("test", "test"))
resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusUnauthorized, "Unauthorized")
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
return resp, nil
})
}
})())
spt.SetSender(s)
err := spt.Refresh()
if err == nil {
t.Fatalf("adal: ServicePrincipalToken#Refresh should reject a response with status != %d", http.StatusOK)
}
}
func TestServicePrincipalTokenRefreshRejectsEmptyBody(t *testing.T) {
spt := newServicePrincipalToken()
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
return mocks.NewResponse(), nil
})
}
})())
spt.SetSender(s)
err := spt.Refresh()
if err == nil {
t.Fatal("adal: ServicePrincipalToken#Refresh should reject an empty token")
}
}
func TestServicePrincipalTokenRefreshPropagatesErrors(t *testing.T) {
spt := newServicePrincipalToken()
c := mocks.NewSender()
c.SetError(fmt.Errorf("Faux Error"))
spt.SetSender(c)
err := spt.Refresh()
if err == nil {
t.Fatal("adal: Failed to propagate the request error")
}
}
func TestServicePrincipalTokenRefreshReturnsErrorIfNotOk(t *testing.T) {
spt := newServicePrincipalToken()
c := mocks.NewSender()
c.AppendResponse(mocks.NewResponseWithStatus("401 NotAuthorized", http.StatusUnauthorized))
spt.SetSender(c)
err := spt.Refresh()
if err == nil {
t.Fatalf("adal: Failed to return an when receiving a status code other than HTTP %d", http.StatusOK)
}
}
func TestServicePrincipalTokenRefreshUnmarshals(t *testing.T) {
spt := newServicePrincipalToken()
expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds()))
j := newTokenJSON(expiresOn, "resource")
resp := mocks.NewResponseWithContent(j)
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
return resp, nil
})
}
})())
spt.SetSender(s)
err := spt.Refresh()
if err != nil {
t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err)
} else if spt.AccessToken != "accessToken" ||
spt.ExpiresIn != "3600" ||
spt.ExpiresOn != expiresOn ||
spt.NotBefore != expiresOn ||
spt.Resource != "resource" ||
spt.Type != "Bearer" {
t.Fatalf("adal: ServicePrincipalToken#Refresh failed correctly unmarshal the JSON -- expected %v, received %v",
j, *spt)
}
}
func TestServicePrincipalTokenEnsureFreshRefreshes(t *testing.T) {
spt := newServicePrincipalToken()
expireToken(&spt.Token)
body := mocks.NewBody(newTokenJSON("test", "test"))
resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")
f := false
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
f = true
return resp, nil
})
}
})())
spt.SetSender(s)
err := spt.EnsureFresh()
if err != nil {
t.Fatalf("adal: ServicePrincipalToken#EnsureFresh returned an unexpected error (%v)", err)
}
if !f {
t.Fatal("adal: ServicePrincipalToken#EnsureFresh failed to call Refresh for stale token")
}
}
func TestServicePrincipalTokenEnsureFreshSkipsIfFresh(t *testing.T) {
spt := newServicePrincipalToken()
setTokenToExpireIn(&spt.Token, 1000*time.Second)
f := false
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
f = true
return mocks.NewResponse(), nil
})
}
})())
spt.SetSender(s)
err := spt.EnsureFresh()
if err != nil {
t.Fatalf("adal: ServicePrincipalToken#EnsureFresh returned an unexpected error (%v)", err)
}
if f {
t.Fatal("adal: ServicePrincipalToken#EnsureFresh invoked Refresh for fresh token")
}
}
func TestRefreshCallback(t *testing.T) {
callbackTriggered := false
spt := newServicePrincipalToken(func(Token) error {
callbackTriggered = true
return nil
})
expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds()))
sender := mocks.NewSender()
j := newTokenJSON(expiresOn, "resource")
sender.AppendResponse(mocks.NewResponseWithContent(j))
spt.SetSender(sender)
err := spt.Refresh()
if err != nil {
t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err)
}
if !callbackTriggered {
t.Fatalf("adal: RefreshCallback failed to trigger call callback")
}
}
func TestRefreshCallbackErrorPropagates(t *testing.T) {
errorText := "this is an error text"
spt := newServicePrincipalToken(func(Token) error {
return fmt.Errorf(errorText)
})
expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds()))
sender := mocks.NewSender()
j := newTokenJSON(expiresOn, "resource")
sender.AppendResponse(mocks.NewResponseWithContent(j))
spt.SetSender(sender)
err := spt.Refresh()
if err == nil || !strings.Contains(err.Error(), errorText) {
t.Fatalf("adal: RefreshCallback failed to propagate error")
}
}
// This demonstrates the danger of manual token without a refresh token
func TestServicePrincipalTokenManualRefreshFailsWithoutRefresh(t *testing.T) {
spt := newServicePrincipalTokenManual()
spt.RefreshToken = ""
err := spt.Refresh()
if err == nil {
t.Fatalf("adal: ServicePrincipalToken#Refresh should have failed with a ManualTokenSecret without a refresh token")
}
}
func TestNewServicePrincipalTokenFromMSI(t *testing.T) {
resource := "https://resource"
cb := func(token Token) error { return nil }
spt, err := NewServicePrincipalTokenFromMSI("http://msiendpoint/", resource, cb)
if err != nil {
t.Fatalf("Failed to get MSI SPT: %v", err)
}
// check some of the SPT fields
if _, ok := spt.secret.(*ServicePrincipalMSISecret); !ok {
t.Fatal("SPT secret was not of MSI type")
}
if spt.resource != resource {
t.Fatal("SPT came back with incorrect resource")
}
if len(spt.refreshCallbacks) != 1 {
t.Fatal("SPT had incorrect refresh callbacks.")
}
}
func TestGetVMEndpoint(t *testing.T) {
tempSettingsFile, err := ioutil.TempFile("", "ManagedIdentity-Settings")
if err != nil {
t.Fatal("Couldn't write temp settings file")
}
defer os.Remove(tempSettingsFile.Name())
settingsContents := []byte(`{
"url": "http://msiendpoint/"
}`)
if _, err := tempSettingsFile.Write(settingsContents); err != nil {
t.Fatal("Couldn't fill temp settings file")
}
endpoint, err := getMSIVMEndpoint(tempSettingsFile.Name())
if err != nil {
t.Fatal("Coudn't get VM endpoint")
}
if endpoint != "http://msiendpoint/" {
t.Fatal("Didn't get correct endpoint")
}
}
func newToken() *Token {
return &Token{
AccessToken: "ASECRETVALUE",
Resource: "https://azure.microsoft.com/",
Type: "Bearer",
}
}
func newTokenJSON(expiresOn string, resource string) string {
return fmt.Sprintf(`{
"access_token" : "accessToken",
"expires_in" : "3600",
"expires_on" : "%s",
"not_before" : "%s",
"resource" : "%s",
"token_type" : "Bearer"
}`,
expiresOn, expiresOn, resource)
}
func newTokenExpiresIn(expireIn time.Duration) *Token {
return setTokenToExpireIn(newToken(), expireIn)
}
func newTokenExpiresAt(expireAt time.Time) *Token {
return setTokenToExpireAt(newToken(), expireAt)
}
func expireToken(t *Token) *Token {
return setTokenToExpireIn(t, 0)
}
func setTokenToExpireAt(t *Token, expireAt time.Time) *Token {
t.ExpiresIn = "3600"
t.ExpiresOn = strconv.Itoa(int(expireAt.Sub(date.UnixEpoch()).Seconds()))
t.NotBefore = t.ExpiresOn
return t
}
func setTokenToExpireIn(t *Token, expireIn time.Duration) *Token {
return setTokenToExpireAt(t, time.Now().Add(expireIn))
}
func newServicePrincipalToken(callbacks ...TokenRefreshCallback) *ServicePrincipalToken {
spt, _ := NewServicePrincipalToken(TestOAuthConfig, "id", "secret", "resource", callbacks...)
return spt
}
func newServicePrincipalTokenManual() *ServicePrincipalToken {
token := newToken()
token.RefreshToken = "refreshtoken"
spt, _ := NewServicePrincipalTokenFromManualToken(TestOAuthConfig, "id", "resource", *token)
return spt
}
func newServicePrincipalTokenCertificate(t *testing.T) *ServicePrincipalToken {
template := x509.Certificate{
SerialNumber: big.NewInt(0),
Subject: pkix.Name{CommonName: "test"},
BasicConstraintsValid: true,
}
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
certificateBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
if err != nil {
t.Fatal(err)
}
certificate, err := x509.ParseCertificate(certificateBytes)
if err != nil {
t.Fatal(err)
}
spt, _ := NewServicePrincipalTokenFromCertificate(TestOAuthConfig, "id", certificate, privateKey, "resource")
return spt
}

View file

@ -0,0 +1,181 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"net/http"
"net/url"
"strings"
"github.com/Azure/go-autorest/autorest/adal"
)
const (
bearerChallengeHeader = "Www-Authenticate"
bearer = "Bearer"
tenantID = "tenantID"
)
// Authorizer is the interface that provides a PrepareDecorator used to supply request
// authorization. Most often, the Authorizer decorator runs last so it has access to the full
// state of the formed HTTP request.
type Authorizer interface {
WithAuthorization() PrepareDecorator
}
// NullAuthorizer implements a default, "do nothing" Authorizer.
type NullAuthorizer struct{}
// WithAuthorization returns a PrepareDecorator that does nothing.
func (na NullAuthorizer) WithAuthorization() PrepareDecorator {
return WithNothing()
}
// BearerAuthorizer implements the bearer authorization
type BearerAuthorizer struct {
tokenProvider adal.OAuthTokenProvider
}
// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider
func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer {
return &BearerAuthorizer{tokenProvider: tp}
}
func (ba *BearerAuthorizer) withBearerAuthorization() PrepareDecorator {
return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))
}
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
// value is "Bearer " followed by the token.
//
// By default, the token will be automatically refreshed through the Refresher interface.
func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
refresher, ok := ba.tokenProvider.(adal.Refresher)
if ok {
err := refresher.EnsureFresh()
if err != nil {
return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", nil,
"Failed to refresh the Token for request to %s", r.URL)
}
}
return (ba.withBearerAuthorization()(p)).Prepare(r)
})
}
}
// BearerAuthorizerCallbackFunc is the authentication callback signature.
type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error)
// BearerAuthorizerCallback implements bearer authorization via a callback.
type BearerAuthorizerCallback struct {
sender Sender
callback BearerAuthorizerCallbackFunc
}
// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
// is invoked when the HTTP request is submitted.
func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
if sender == nil {
sender = &http.Client{}
}
return &BearerAuthorizerCallback{sender: sender, callback: callback}
}
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback.
//
// By default, the token will be automatically refreshed through the Refresher interface.
func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
// make a copy of the request and remove the body as it's not
// required and avoids us having to create a copy of it.
rCopy := *r
removeRequestBody(&rCopy)
resp, err := bacb.sender.Do(&rCopy)
if err == nil && resp.StatusCode == 401 {
defer resp.Body.Close()
if hasBearerChallenge(resp) {
bc, err := newBearerChallenge(resp)
if err != nil {
return r, err
}
if bacb.callback != nil {
ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"])
if err != nil {
return r, err
}
return ba.WithAuthorization()(p).Prepare(r)
}
}
}
return r, err
})
}
}
// returns true if the HTTP response contains a bearer challenge
func hasBearerChallenge(resp *http.Response) bool {
authHeader := resp.Header.Get(bearerChallengeHeader)
if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 {
return false
}
return true
}
type bearerChallenge struct {
values map[string]string
}
func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) {
challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader))
trimmedChallenge := challenge[len(bearer)+1:]
// challenge is a set of key=value pairs that are comma delimited
pairs := strings.Split(trimmedChallenge, ",")
if len(pairs) < 1 {
err = fmt.Errorf("challenge '%s' contains no pairs", challenge)
return bc, err
}
bc.values = make(map[string]string)
for i := range pairs {
trimmedPair := strings.TrimSpace(pairs[i])
pair := strings.Split(trimmedPair, "=")
if len(pair) == 2 {
// remove the enclosing quotes
key := strings.Trim(pair[0], "\"")
value := strings.Trim(pair[1], "\"")
switch key {
case "authorization", "authorization_uri":
// strip the tenant ID from the authorization URL
asURL, err := url.Parse(value)
if err != nil {
return bc, err
}
bc.values[tenantID] = asURL.Path[1:]
default:
bc.values[key] = value
}
}
}
return bc, err
}

View file

@ -0,0 +1,188 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"net/http"
"reflect"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/mocks"
)
const (
TestTenantID = "TestTenantID"
TestActiveDirectoryEndpoint = "https://login/test.com/"
)
func TestWithAuthorizer(t *testing.T) {
r1 := mocks.NewRequest()
na := &NullAuthorizer{}
r2, err := Prepare(r1,
na.WithAuthorization())
if err != nil {
t.Fatalf("autorest: NullAuthorizer#WithAuthorization returned an unexpected error (%v)", err)
} else if !reflect.DeepEqual(r1, r2) {
t.Fatalf("autorest: NullAuthorizer#WithAuthorization modified the request -- received %v, expected %v", r2, r1)
}
}
func TestTokenWithAuthorization(t *testing.T) {
token := &adal.Token{
AccessToken: "TestToken",
Resource: "https://azure.microsoft.com/",
Type: "Bearer",
}
ba := NewBearerAuthorizer(token)
req, err := Prepare(&http.Request{}, ba.WithAuthorization())
if err != nil {
t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err)
} else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", token.AccessToken) {
t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header")
}
}
func TestServicePrincipalTokenWithAuthorizationNoRefresh(t *testing.T) {
oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID)
if err != nil {
t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err)
}
spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", nil)
if err != nil {
t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err)
}
spt.SetAutoRefresh(false)
s := mocks.NewSender()
spt.SetSender(s)
ba := NewBearerAuthorizer(spt)
req, err := Prepare(mocks.NewRequest(), ba.WithAuthorization())
if err != nil {
t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err)
} else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) {
t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header")
}
}
func TestServicePrincipalTokenWithAuthorizationRefresh(t *testing.T) {
oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID)
if err != nil {
t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err)
}
refreshed := false
spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", func(t adal.Token) error {
refreshed = true
return nil
})
if err != nil {
t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err)
}
jwt := `{
"access_token" : "accessToken",
"expires_in" : "3600",
"expires_on" : "test",
"not_before" : "test",
"resource" : "test",
"token_type" : "Bearer"
}`
body := mocks.NewBody(jwt)
resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")
c := mocks.NewSender()
s := DecorateSender(c,
(func() SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
return resp, nil
})
}
})())
spt.SetSender(s)
ba := NewBearerAuthorizer(spt)
req, err := Prepare(mocks.NewRequest(), ba.WithAuthorization())
if err != nil {
t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err)
} else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) {
t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header")
}
if !refreshed {
t.Fatal("azure: BearerAuthorizer#WithAuthorization must refresh the token")
}
}
func TestServicePrincipalTokenWithAuthorizationReturnsErrorIfConnotRefresh(t *testing.T) {
oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID)
if err != nil {
t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err)
}
spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", nil)
if err != nil {
t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err)
}
s := mocks.NewSender()
s.AppendResponse(mocks.NewResponseWithStatus("400 Bad Request", http.StatusBadRequest))
spt.SetSender(s)
ba := NewBearerAuthorizer(spt)
_, err = Prepare(mocks.NewRequest(), ba.WithAuthorization())
if err == nil {
t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to return an error when refresh fails")
}
}
func TestBearerAuthorizerCallback(t *testing.T) {
tenantString := "123-tenantID-456"
resourceString := "https://fake.resource.net"
s := mocks.NewSender()
resp := mocks.NewResponseWithStatus("401 Unauthorized", http.StatusUnauthorized)
mocks.SetResponseHeader(resp, bearerChallengeHeader, bearer+" \"authorization\"=\"https://fake.net/"+tenantString+"\",\"resource\"=\""+resourceString+"\"")
s.AppendResponse(resp)
auth := NewBearerAuthorizerCallback(s, func(tenantID, resource string) (*BearerAuthorizer, error) {
if tenantID != tenantString {
t.Fatal("BearerAuthorizerCallback: bad tenant ID")
}
if resource != resourceString {
t.Fatal("BearerAuthorizerCallback: bad resource")
}
oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, tenantID)
if err != nil {
t.Fatalf("azure: NewOAuthConfig returned an error (%v)", err)
}
spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", resource)
if err != nil {
t.Fatalf("azure: NewServicePrincipalToken returned an error (%v)", err)
}
spt.SetSender(s)
return NewBearerAuthorizer(spt), nil
})
_, err := Prepare(mocks.NewRequest(), auth.WithAuthorization())
if err == nil {
t.Fatal("azure: BearerAuthorizerCallback#WithAuthorization failed to return an error when refresh fails")
}
}

View file

@ -0,0 +1,129 @@
/*
Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines
and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/)
generated Go code.
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
and Responding. A typical pattern is:
req, err := Prepare(&http.Request{},
token.WithAuthorization())
resp, err := Send(req,
WithLogging(logger),
DoErrorIfStatusCode(http.StatusInternalServerError),
DoCloseIfError(),
DoRetryForAttempts(5, time.Second))
err = Respond(resp,
ByDiscardingBody(),
ByClosing())
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
and then pass the data along, pass the data first and then modify the result, or wrap themselves
around passing the data (such as a logger might do). Decorators run in the order provided. For
example, the following:
req, err := Prepare(&http.Request{},
WithBaseURL("https://microsoft.com/"),
WithPath("a"),
WithPath("b"),
WithPath("c"))
will set the URL to:
https://microsoft.com/a/b/c
Preparers and Responders may be shared and re-used (assuming the underlying decorators support
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
all bound together by means of input / output channels.
Decorators hold their passed state within a closure (such as the path components in the example
above). Be careful to share Preparers and Responders only in a context where such held state
applies. For example, it may not make sense to share a Preparer that applies a query string from a
fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
struct (e.g., ByUnmarshallingJson) is likely incorrect.
Lastly, the Swagger specification (https://swagger.io) that drives AutoRest
(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure
correct parsing and formatting.
Errors raised by autorest objects and methods will conform to the autorest.Error interface.
See the included examples for more detail. For details on the suggested use of this package by
generated clients, see the Client described below.
*/
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"net/http"
"time"
)
const (
// HeaderLocation specifies the HTTP Location header.
HeaderLocation = "Location"
// HeaderRetryAfter specifies the HTTP Retry-After header.
HeaderRetryAfter = "Retry-After"
)
// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set
// and false otherwise.
func ResponseHasStatusCode(resp *http.Response, codes ...int) bool {
return containsInt(codes, resp.StatusCode)
}
// GetLocation retrieves the URL from the Location header of the passed response.
func GetLocation(resp *http.Response) string {
return resp.Header.Get(HeaderLocation)
}
// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If
// the header is absent or is malformed, it will return the supplied default delay time.Duration.
func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration {
retry := resp.Header.Get(HeaderRetryAfter)
if retry == "" {
return defaultDelay
}
d, err := time.ParseDuration(retry + "s")
if err != nil {
return defaultDelay
}
return d
}
// NewPollingRequest allocates and returns a new http.Request to poll for the passed response.
func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) {
location := GetLocation(resp)
if location == "" {
return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling")
}
req, err := Prepare(&http.Request{Cancel: cancel},
AsGet(),
WithBaseURL(location))
if err != nil {
return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location)
}
return req, nil
}

View file

@ -0,0 +1,140 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"net/http"
"testing"
"github.com/Azure/go-autorest/autorest/mocks"
)
func TestResponseHasStatusCode(t *testing.T) {
codes := []int{http.StatusOK, http.StatusAccepted}
resp := &http.Response{StatusCode: http.StatusAccepted}
if !ResponseHasStatusCode(resp, codes...) {
t.Fatalf("autorest: ResponseHasStatusCode failed to find %v in %v", resp.StatusCode, codes)
}
}
func TestResponseHasStatusCodeNotPresent(t *testing.T) {
codes := []int{http.StatusOK, http.StatusAccepted}
resp := &http.Response{StatusCode: http.StatusInternalServerError}
if ResponseHasStatusCode(resp, codes...) {
t.Fatalf("autorest: ResponseHasStatusCode unexpectedly found %v in %v", resp.StatusCode, codes)
}
}
func TestNewPollingRequestDoesNotReturnARequestWhenLocationHeaderIsMissing(t *testing.T) {
resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError)
req, _ := NewPollingRequest(resp, nil)
if req != nil {
t.Fatal("autorest: NewPollingRequest returned an http.Request when the Location header was missing")
}
}
func TestNewPollingRequestReturnsAnErrorWhenPrepareFails(t *testing.T) {
resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted)
mocks.SetAcceptedHeaders(resp)
resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL)
_, err := NewPollingRequest(resp, nil)
if err == nil {
t.Fatal("autorest: NewPollingRequest failed to return an error when Prepare fails")
}
}
func TestNewPollingRequestDoesNotReturnARequestWhenPrepareFails(t *testing.T) {
resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted)
mocks.SetAcceptedHeaders(resp)
resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL)
req, _ := NewPollingRequest(resp, nil)
if req != nil {
t.Fatal("autorest: NewPollingRequest returned an http.Request when Prepare failed")
}
}
func TestNewPollingRequestReturnsAGetRequest(t *testing.T) {
resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted)
mocks.SetAcceptedHeaders(resp)
req, _ := NewPollingRequest(resp, nil)
if req.Method != "GET" {
t.Fatalf("autorest: NewPollingRequest did not create an HTTP GET request -- actual method %v", req.Method)
}
}
func TestNewPollingRequestProvidesTheURL(t *testing.T) {
resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted)
mocks.SetAcceptedHeaders(resp)
req, _ := NewPollingRequest(resp, nil)
if req.URL.String() != mocks.TestURL {
t.Fatalf("autorest: NewPollingRequest did not create an HTTP with the expected URL -- received %v, expected %v", req.URL, mocks.TestURL)
}
}
func TestGetLocation(t *testing.T) {
resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted)
mocks.SetAcceptedHeaders(resp)
l := GetLocation(resp)
if len(l) == 0 {
t.Fatalf("autorest: GetLocation failed to return Location header -- expected %v, received %v", mocks.TestURL, l)
}
}
func TestGetLocationReturnsEmptyStringForMissingLocation(t *testing.T) {
resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted)
l := GetLocation(resp)
if len(l) != 0 {
t.Fatalf("autorest: GetLocation return a value without a Location header -- received %v", l)
}
}
func TestGetRetryAfter(t *testing.T) {
resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted)
mocks.SetAcceptedHeaders(resp)
d := GetRetryAfter(resp, DefaultPollingDelay)
if d != mocks.TestDelay {
t.Fatalf("autorest: GetRetryAfter failed to returned the expected delay -- expected %v, received %v", mocks.TestDelay, d)
}
}
func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMissing(t *testing.T) {
resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted)
d := GetRetryAfter(resp, DefaultPollingDelay)
if d != DefaultPollingDelay {
t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a missing Retry-After header -- expected %v, received %v",
DefaultPollingDelay, d)
}
}
func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMalformed(t *testing.T) {
resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted)
mocks.SetAcceptedHeaders(resp)
resp.Header.Set(http.CanonicalHeaderKey(HeaderRetryAfter), "a very bad non-integer value")
d := GetRetryAfter(resp, DefaultPollingDelay)
if d != DefaultPollingDelay {
t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a malformed Retry-After header -- expected %v, received %v",
DefaultPollingDelay, d)
}
}

View file

@ -0,0 +1,316 @@
package azure
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/date"
)
const (
headerAsyncOperation = "Azure-AsyncOperation"
)
const (
operationInProgress string = "InProgress"
operationCanceled string = "Canceled"
operationFailed string = "Failed"
operationSucceeded string = "Succeeded"
)
// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure
// long-running operation. It will delay between requests for the duration specified in the
// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
// closing the optional channel on the http.Request.
func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator {
return func(s autorest.Sender) autorest.Sender {
return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
resp, err = s.Do(r)
if err != nil {
return resp, err
}
pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK}
if !autorest.ResponseHasStatusCode(resp, pollingCodes...) {
return resp, nil
}
ps := pollingState{}
for err == nil {
err = updatePollingState(resp, &ps)
if err != nil {
break
}
if ps.hasTerminated() {
if !ps.hasSucceeded() {
err = ps
}
break
}
r, err = newPollingRequest(resp, ps)
if err != nil {
return resp, err
}
delay = autorest.GetRetryAfter(resp, delay)
resp, err = autorest.SendWithSender(s, r,
autorest.AfterDelay(delay))
}
return resp, err
})
}
}
func getAsyncOperation(resp *http.Response) string {
return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation))
}
func hasSucceeded(state string) bool {
return state == operationSucceeded
}
func hasTerminated(state string) bool {
switch state {
case operationCanceled, operationFailed, operationSucceeded:
return true
default:
return false
}
}
func hasFailed(state string) bool {
return state == operationFailed
}
type provisioningTracker interface {
state() string
hasSucceeded() bool
hasTerminated() bool
}
type operationResource struct {
// Note:
// The specification states services should return the "id" field. However some return it as
// "operationId".
ID string `json:"id"`
OperationID string `json:"operationId"`
Name string `json:"name"`
Status string `json:"status"`
Properties map[string]interface{} `json:"properties"`
OperationError ServiceError `json:"error"`
StartTime date.Time `json:"startTime"`
EndTime date.Time `json:"endTime"`
PercentComplete float64 `json:"percentComplete"`
}
func (or operationResource) state() string {
return or.Status
}
func (or operationResource) hasSucceeded() bool {
return hasSucceeded(or.state())
}
func (or operationResource) hasTerminated() bool {
return hasTerminated(or.state())
}
type provisioningProperties struct {
ProvisioningState string `json:"provisioningState"`
}
type provisioningStatus struct {
Properties provisioningProperties `json:"properties,omitempty"`
ProvisioningError ServiceError `json:"error,omitempty"`
}
func (ps provisioningStatus) state() string {
return ps.Properties.ProvisioningState
}
func (ps provisioningStatus) hasSucceeded() bool {
return hasSucceeded(ps.state())
}
func (ps provisioningStatus) hasTerminated() bool {
return hasTerminated(ps.state())
}
func (ps provisioningStatus) hasProvisioningError() bool {
return ps.ProvisioningError != ServiceError{}
}
type pollingResponseFormat string
const (
usesOperationResponse pollingResponseFormat = "OperationResponse"
usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus"
formatIsUnknown pollingResponseFormat = ""
)
type pollingState struct {
responseFormat pollingResponseFormat
uri string
state string
code string
message string
}
func (ps pollingState) hasSucceeded() bool {
return hasSucceeded(ps.state)
}
func (ps pollingState) hasTerminated() bool {
return hasTerminated(ps.state)
}
func (ps pollingState) hasFailed() bool {
return hasFailed(ps.state)
}
func (ps pollingState) Error() string {
return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message)
}
// updatePollingState maps the operation status -- retrieved from either a provisioningState
// field, the status field of an OperationResource, or inferred from the HTTP status code --
// into a well-known states. Since the process begins from the initial request, the state
// always comes from either a the provisioningState returned or is inferred from the HTTP
// status code. Subsequent requests will read an Azure OperationResource object if the
// service initially returned the Azure-AsyncOperation header. The responseFormat field notes
// the expected response format.
func updatePollingState(resp *http.Response, ps *pollingState) error {
// Determine the response shape
// -- The first response will always be a provisioningStatus response; only the polling requests,
// depending on the header returned, may be something otherwise.
var pt provisioningTracker
if ps.responseFormat == usesOperationResponse {
pt = &operationResource{}
} else {
pt = &provisioningStatus{}
}
// If this is the first request (that is, the polling response shape is unknown), determine how
// to poll and what to expect
if ps.responseFormat == formatIsUnknown {
req := resp.Request
if req == nil {
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing")
}
// Prefer the Azure-AsyncOperation header
ps.uri = getAsyncOperation(resp)
if ps.uri != "" {
ps.responseFormat = usesOperationResponse
} else {
ps.responseFormat = usesProvisioningStatus
}
// Else, use the Location header
if ps.uri == "" {
ps.uri = autorest.GetLocation(resp)
}
// Lastly, requests against an existing resource, use the last request URI
if ps.uri == "" {
m := strings.ToUpper(req.Method)
if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet {
ps.uri = req.URL.String()
}
}
}
// Read and interpret the response (saving the Body in case no polling is necessary)
b := &bytes.Buffer{}
err := autorest.Respond(resp,
autorest.ByCopying(b),
autorest.ByUnmarshallingJSON(pt),
autorest.ByClosing())
resp.Body = ioutil.NopCloser(b)
if err != nil {
return err
}
// Interpret the results
// -- Terminal states apply regardless
// -- Unknown states are per-service inprogress states
// -- Otherwise, infer state from HTTP status code
if pt.hasTerminated() {
ps.state = pt.state()
} else if pt.state() != "" {
ps.state = operationInProgress
} else {
switch resp.StatusCode {
case http.StatusAccepted:
ps.state = operationInProgress
case http.StatusNoContent, http.StatusCreated, http.StatusOK:
ps.state = operationSucceeded
default:
ps.state = operationFailed
}
}
if ps.state == operationInProgress && ps.uri == "" {
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL)
}
// For failed operation, check for error code and message in
// -- Operation resource
// -- Response
// -- Otherwise, Unknown
if ps.hasFailed() {
if ps.responseFormat == usesOperationResponse {
or := pt.(*operationResource)
ps.code = or.OperationError.Code
ps.message = or.OperationError.Message
} else {
p := pt.(*provisioningStatus)
if p.hasProvisioningError() {
ps.code = p.ProvisioningError.Code
ps.message = p.ProvisioningError.Message
} else {
ps.code = "Unknown"
ps.message = "None"
}
}
}
return nil
}
func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) {
req := resp.Request
if req == nil {
return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing")
}
reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel},
autorest.AsGet(),
autorest.WithBaseURL(ps.uri))
if err != nil {
return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri)
}
return reqPoll, nil
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,200 @@
/*
Package azure provides Azure-specific implementations used with AutoRest.
See the included examples for more detail.
*/
package azure
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"github.com/Azure/go-autorest/autorest"
)
const (
// HeaderClientID is the Azure extension header to set a user-specified request ID.
HeaderClientID = "x-ms-client-request-id"
// HeaderReturnClientID is the Azure extension header to set if the user-specified request ID
// should be included in the response.
HeaderReturnClientID = "x-ms-return-client-request-id"
// HeaderRequestID is the Azure extension header of the service generated request ID returned
// in the response.
HeaderRequestID = "x-ms-request-id"
)
// ServiceError encapsulates the error response from an Azure service.
type ServiceError struct {
Code string `json:"code"`
Message string `json:"message"`
Details *[]interface{} `json:"details"`
}
func (se ServiceError) Error() string {
if se.Details != nil {
d, err := json.Marshal(*(se.Details))
if err != nil {
return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details)
}
return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d))
}
return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message)
}
// RequestError describes an error response returned by Azure service.
type RequestError struct {
autorest.DetailedError
// The error returned by the Azure service.
ServiceError *ServiceError `json:"error"`
// The request id (from the x-ms-request-id-header) of the request.
RequestID string
}
// Error returns a human-friendly error message from service error.
func (e RequestError) Error() string {
return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v",
e.StatusCode, e.ServiceError)
}
// IsAzureError returns true if the passed error is an Azure Service error; false otherwise.
func IsAzureError(e error) bool {
_, ok := e.(*RequestError)
return ok
}
// NewErrorWithError creates a new Error conforming object from the
// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
// if resp is nil), message, and original error. message is treated as a format
// string to which the optional args apply.
func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError {
if v, ok := original.(*RequestError); ok {
return *v
}
statusCode := autorest.UndefinedStatusCode
if resp != nil {
statusCode = resp.StatusCode
}
return RequestError{
DetailedError: autorest.DetailedError{
Original: original,
PackageType: packageType,
Method: method,
StatusCode: statusCode,
Message: fmt.Sprintf(message, args...),
},
}
}
// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of
// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g.,
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id
// header to true such that UUID accompanies the http.Response.
func WithReturningClientID(uuid string) autorest.PrepareDecorator {
preparer := autorest.CreatePreparer(
WithClientID(uuid),
WithReturnClientID(true))
return func(p autorest.Preparer) autorest.Preparer {
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err != nil {
return r, err
}
return preparer.Prepare(r)
})
}
}
// WithClientID returns a PrepareDecorator that adds an HTTP extension header of
// x-ms-client-request-id whose value is passed, undecorated UUID (e.g.,
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA").
func WithClientID(uuid string) autorest.PrepareDecorator {
return autorest.WithHeader(HeaderClientID, uuid)
}
// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of
// x-ms-return-client-request-id whose boolean value indicates if the value of the
// x-ms-client-request-id header should be included in the http.Response.
func WithReturnClientID(b bool) autorest.PrepareDecorator {
return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b))
}
// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the
// http.Request sent to the service (and returned in the http.Response)
func ExtractClientID(resp *http.Response) string {
return autorest.ExtractHeaderValue(HeaderClientID, resp)
}
// ExtractRequestID extracts the Azure server generated request identifier from the
// x-ms-request-id header.
func ExtractRequestID(resp *http.Response) string {
return autorest.ExtractHeaderValue(HeaderRequestID, resp)
}
// WithErrorUnlessStatusCode returns a RespondDecorator that emits an
// azure.RequestError by reading the response body unless the response HTTP status code
// is among the set passed.
//
// If there is a chance service may return responses other than the Azure error
// format and the response cannot be parsed into an error, a decoding error will
// be returned containing the response body. In any case, the Responder will
// return an error if the status code is not satisfied.
//
// If this Responder returns an error, the response body will be replaced with
// an in-memory reader, which needs no further closing.
func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
return func(r autorest.Responder) autorest.Responder {
return autorest.ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) {
var e RequestError
defer resp.Body.Close()
// Copy and replace the Body in case it does not contain an error object.
// This will leave the Body available to the caller.
b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e)
resp.Body = ioutil.NopCloser(&b)
if decodeErr != nil {
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
} else if e.ServiceError == nil {
// Check if error is unwrapped ServiceError
if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" {
e.ServiceError = &ServiceError{
Code: "Unknown",
Message: "Unknown service error",
}
}
}
e.RequestID = ExtractRequestID(resp)
if e.StatusCode == nil {
e.StatusCode = resp.StatusCode
}
err = &e
}
return err
})
}
}

View file

@ -0,0 +1,513 @@
package azure
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"reflect"
"strconv"
"testing"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/mocks"
)
const (
headerAuthorization = "Authorization"
longDelay = 5 * time.Second
retryDelay = 10 * time.Millisecond
testLogPrefix = "azure:"
)
// Use a Client Inspector to set the request identifier.
func ExampleWithClientID() {
uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6"
req, _ := autorest.Prepare(&http.Request{},
autorest.AsGet(),
autorest.WithBaseURL("https://microsoft.com/a/b/c/"))
c := autorest.Client{Sender: mocks.NewSender()}
c.RequestInspector = WithReturningClientID(uuid)
autorest.SendWithSender(c, req)
fmt.Printf("Inspector added the %s header with the value %s\n",
HeaderClientID, req.Header.Get(HeaderClientID))
fmt.Printf("Inspector added the %s header with the value %s\n",
HeaderReturnClientID, req.Header.Get(HeaderReturnClientID))
// Output:
// Inspector added the x-ms-client-request-id header with the value 71FDB9F4-5E49-4C12-B266-DE7B4FD999A6
// Inspector added the x-ms-return-client-request-id header with the value true
}
func TestWithReturningClientIDReturnsError(t *testing.T) {
var errIn error
uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6"
_, errOut := autorest.Prepare(&http.Request{},
withErrorPrepareDecorator(&errIn),
WithReturningClientID(uuid))
if errOut == nil || errIn != errOut {
t.Fatalf("azure: WithReturningClientID failed to exit early when receiving an error -- expected (%v), received (%v)",
errIn, errOut)
}
}
func TestWithClientID(t *testing.T) {
uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6"
req, _ := autorest.Prepare(&http.Request{},
WithClientID(uuid))
if req.Header.Get(HeaderClientID) != uuid {
t.Fatalf("azure: WithClientID failed to set %s -- expected %s, received %s",
HeaderClientID, uuid, req.Header.Get(HeaderClientID))
}
}
func TestWithReturnClientID(t *testing.T) {
b := false
req, _ := autorest.Prepare(&http.Request{},
WithReturnClientID(b))
if req.Header.Get(HeaderReturnClientID) != strconv.FormatBool(b) {
t.Fatalf("azure: WithReturnClientID failed to set %s -- expected %s, received %s",
HeaderClientID, strconv.FormatBool(b), req.Header.Get(HeaderClientID))
}
}
func TestExtractClientID(t *testing.T) {
uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6"
resp := mocks.NewResponse()
mocks.SetResponseHeader(resp, HeaderClientID, uuid)
if ExtractClientID(resp) != uuid {
t.Fatalf("azure: ExtractClientID failed to extract the %s -- expected %s, received %s",
HeaderClientID, uuid, ExtractClientID(resp))
}
}
func TestExtractRequestID(t *testing.T) {
uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6"
resp := mocks.NewResponse()
mocks.SetResponseHeader(resp, HeaderRequestID, uuid)
if ExtractRequestID(resp) != uuid {
t.Fatalf("azure: ExtractRequestID failed to extract the %s -- expected %s, received %s",
HeaderRequestID, uuid, ExtractRequestID(resp))
}
}
func TestIsAzureError_ReturnsTrueForAzureError(t *testing.T) {
if !IsAzureError(&RequestError{}) {
t.Fatalf("azure: IsAzureError failed to return true for an Azure Service error")
}
}
func TestIsAzureError_ReturnsFalseForNonAzureError(t *testing.T) {
if IsAzureError(fmt.Errorf("An Error")) {
t.Fatalf("azure: IsAzureError return true for an non-Azure Service error")
}
}
func TestNewErrorWithError_UsesReponseStatusCode(t *testing.T) {
e := NewErrorWithError(fmt.Errorf("Error"), "packageType", "method", mocks.NewResponseWithStatus("Forbidden", http.StatusForbidden), "message")
if e.StatusCode != http.StatusForbidden {
t.Fatalf("azure: NewErrorWithError failed to use the Status Code of the passed Response -- expected %v, received %v", http.StatusForbidden, e.StatusCode)
}
}
func TestNewErrorWithError_ReturnsUnwrappedError(t *testing.T) {
e1 := RequestError{}
e1.ServiceError = &ServiceError{Code: "42", Message: "A Message"}
e1.StatusCode = 200
e1.RequestID = "A RequestID"
e2 := NewErrorWithError(&e1, "packageType", "method", nil, "message")
if !reflect.DeepEqual(e1, e2) {
t.Fatalf("azure: NewErrorWithError wrapped an RequestError -- expected %T, received %T", e1, e2)
}
}
func TestNewErrorWithError_WrapsAnError(t *testing.T) {
e1 := fmt.Errorf("Inner Error")
var e2 interface{} = NewErrorWithError(e1, "packageType", "method", nil, "message")
if _, ok := e2.(RequestError); !ok {
t.Fatalf("azure: NewErrorWithError failed to wrap a standard error -- received %T", e2)
}
}
func TestWithErrorUnlessStatusCode_NotAnAzureError(t *testing.T) {
body := `<html>
<head>
<title>IIS Error page</title>
</head>
<body>Some non-JSON error page</body>
</html>`
r := mocks.NewResponseWithContent(body)
r.Request = mocks.NewRequest()
r.StatusCode = http.StatusBadRequest
r.Status = http.StatusText(r.StatusCode)
err := autorest.Respond(r,
WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
ok, _ := err.(*RequestError)
if ok != nil {
t.Fatalf("azure: azure.RequestError returned from malformed response: %v", err)
}
// the error body should still be there
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != body {
t.Fatalf("response body is wrong. got=%q exptected=%q", string(b), body)
}
}
func TestWithErrorUnlessStatusCode_FoundAzureErrorWithoutDetails(t *testing.T) {
j := `{
"error": {
"code": "InternalError",
"message": "Azure is having trouble right now."
}
}`
uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6"
r := mocks.NewResponseWithContent(j)
mocks.SetResponseHeader(r, HeaderRequestID, uuid)
r.Request = mocks.NewRequest()
r.StatusCode = http.StatusInternalServerError
r.Status = http.StatusText(r.StatusCode)
err := autorest.Respond(r,
WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
if err == nil {
t.Fatalf("azure: returned nil error for proper error response")
}
azErr, ok := err.(*RequestError)
if !ok {
t.Fatalf("azure: returned error is not azure.RequestError: %T", err)
}
expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Azure is having trouble right now.\""
if !reflect.DeepEqual(expected, azErr.Error()) {
t.Fatalf("azure: service error is not unmarshaled properly.\nexpected=%v\ngot=%v", expected, azErr.Error())
}
if expected := http.StatusInternalServerError; azErr.StatusCode != expected {
t.Fatalf("azure: got wrong StatusCode=%d Expected=%d", azErr.StatusCode, expected)
}
if expected := uuid; azErr.RequestID != expected {
t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID)
}
_ = azErr.Error()
// the error body should still be there
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != j {
t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j)
}
}
func TestWithErrorUnlessStatusCode_FoundAzureErrorWithDetails(t *testing.T) {
j := `{
"error": {
"code": "InternalError",
"message": "Azure is having trouble right now.",
"details": [{"code": "conflict1", "message":"error message1"},
{"code": "conflict2", "message":"error message2"}]
}
}`
uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6"
r := mocks.NewResponseWithContent(j)
mocks.SetResponseHeader(r, HeaderRequestID, uuid)
r.Request = mocks.NewRequest()
r.StatusCode = http.StatusInternalServerError
r.Status = http.StatusText(r.StatusCode)
err := autorest.Respond(r,
WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
if err == nil {
t.Fatalf("azure: returned nil error for proper error response")
}
azErr, ok := err.(*RequestError)
if !ok {
t.Fatalf("azure: returned error is not azure.RequestError: %T", err)
}
if expected := "InternalError"; azErr.ServiceError.Code != expected {
t.Fatalf("azure: wrong error code. expected=%q; got=%q", expected, azErr.ServiceError.Code)
}
if azErr.ServiceError.Message == "" {
t.Fatalf("azure: error message is not unmarshaled properly")
}
b, _ := json.Marshal(*azErr.ServiceError.Details)
if string(b) != `[{"code":"conflict1","message":"error message1"},{"code":"conflict2","message":"error message2"}]` {
t.Fatalf("azure: error details is not unmarshaled properly")
}
if expected := http.StatusInternalServerError; azErr.StatusCode != expected {
t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected)
}
if expected := uuid; azErr.RequestID != expected {
t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID)
}
_ = azErr.Error()
// the error body should still be there
defer r.Body.Close()
b, err = ioutil.ReadAll(r.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != j {
t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j)
}
}
func TestWithErrorUnlessStatusCode_NoAzureError(t *testing.T) {
j := `{
"Status":"NotFound"
}`
uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6"
r := mocks.NewResponseWithContent(j)
mocks.SetResponseHeader(r, HeaderRequestID, uuid)
r.Request = mocks.NewRequest()
r.StatusCode = http.StatusInternalServerError
r.Status = http.StatusText(r.StatusCode)
err := autorest.Respond(r,
WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
if err == nil {
t.Fatalf("azure: returned nil error for proper error response")
}
azErr, ok := err.(*RequestError)
if !ok {
t.Fatalf("azure: returned error is not azure.RequestError: %T", err)
}
expected := &ServiceError{
Code: "Unknown",
Message: "Unknown service error",
}
if !reflect.DeepEqual(expected, azErr.ServiceError) {
t.Fatalf("azure: service error is not unmarshaled properly. expected=%q\ngot=%q", expected, azErr.ServiceError)
}
if expected := http.StatusInternalServerError; azErr.StatusCode != expected {
t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected)
}
if expected := uuid; azErr.RequestID != expected {
t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID)
}
_ = azErr.Error()
// the error body should still be there
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != j {
t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j)
}
}
func TestWithErrorUnlessStatusCode_UnwrappedError(t *testing.T) {
j := `{
"target": null,
"code": "InternalError",
"message": "Azure is having trouble right now.",
"details": [{"code": "conflict1", "message":"error message1"},
{"code": "conflict2", "message":"error message2"}],
"innererror": []
}`
uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6"
r := mocks.NewResponseWithContent(j)
mocks.SetResponseHeader(r, HeaderRequestID, uuid)
r.Request = mocks.NewRequest()
r.StatusCode = http.StatusInternalServerError
r.Status = http.StatusText(r.StatusCode)
err := autorest.Respond(r,
WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
if err == nil {
t.Fatal("azure: returned nil error for proper error response")
}
azErr, ok := err.(*RequestError)
if !ok {
t.Fatalf("returned error is not azure.RequestError: %T", err)
}
if expected := http.StatusInternalServerError; azErr.StatusCode != expected {
t.Logf("Incorrect StatusCode got: %v want: %d", azErr.StatusCode, expected)
t.Fail()
}
if expected := "Azure is having trouble right now."; azErr.ServiceError.Message != expected {
t.Logf("Incorrect Message\n\tgot: %q\n\twant: %q", azErr.Message, expected)
t.Fail()
}
if expected := uuid; azErr.RequestID != expected {
t.Logf("Incorrect request ID\n\tgot: %q\n\twant: %q", azErr.RequestID, expected)
t.Fail()
}
expectedServiceErrorDetails := `[{"code":"conflict1","message":"error message1"},{"code":"conflict2","message":"error message2"}]`
if azErr.ServiceError == nil {
t.Logf("`ServiceError` was nil when it shouldn't have been.")
t.Fail()
} else if azErr.ServiceError.Details == nil {
t.Logf("`ServiceError.Details` was nil when it should have been %q", expectedServiceErrorDetails)
t.Fail()
} else if details, _ := json.Marshal(*azErr.ServiceError.Details); expectedServiceErrorDetails != string(details) {
t.Logf("Error detaisl was not unmarshaled properly.\n\tgot: %q\n\twant: %q", string(details), expectedServiceErrorDetails)
t.Fail()
}
// the error body should still be there
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Error(err)
}
if string(b) != j {
t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j)
}
}
func TestRequestErrorString_WithError(t *testing.T) {
j := `{
"error": {
"code": "InternalError",
"message": "Conflict",
"details": [{"code": "conflict1", "message":"error message1"}]
}
}`
uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6"
r := mocks.NewResponseWithContent(j)
mocks.SetResponseHeader(r, HeaderRequestID, uuid)
r.Request = mocks.NewRequest()
r.StatusCode = http.StatusInternalServerError
r.Status = http.StatusText(r.StatusCode)
err := autorest.Respond(r,
WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
if err == nil {
t.Fatalf("azure: returned nil error for proper error response")
}
azErr, _ := err.(*RequestError)
expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Conflict\" Details=[{\"code\":\"conflict1\",\"message\":\"error message1\"}]"
if expected != azErr.Error() {
t.Fatalf("azure: send wrong RequestError.\nexpected=%v\ngot=%v", expected, azErr.Error())
}
}
func withErrorPrepareDecorator(e *error) autorest.PrepareDecorator {
return func(p autorest.Preparer) autorest.Preparer {
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
*e = fmt.Errorf("azure: Faux Prepare Error")
return r, *e
})
}
}
func withAsyncResponseDecorator(n int) autorest.SendDecorator {
i := 0
return func(s autorest.Sender) autorest.Sender {
return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) {
resp, err := s.Do(r)
if err == nil {
if i < n {
resp.StatusCode = http.StatusCreated
resp.Header = http.Header{}
resp.Header.Add(http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestURL)
i++
} else {
resp.StatusCode = http.StatusOK
resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation))
}
}
return resp, err
})
}
}
type mockAuthorizer struct{}
func (ma mockAuthorizer) WithAuthorization() autorest.PrepareDecorator {
return autorest.WithHeader(headerAuthorization, mocks.TestAuthorizationHeader)
}
type mockFailingAuthorizer struct{}
func (mfa mockFailingAuthorizer) WithAuthorization() autorest.PrepareDecorator {
return func(p autorest.Preparer) autorest.Preparer {
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error")
})
}
}
type mockInspector struct {
wasInvoked bool
}
func (mi *mockInspector) WithInspection() autorest.PrepareDecorator {
return func(p autorest.Preparer) autorest.Preparer {
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
mi.wasInvoked = true
return p.Prepare(r)
})
}
}
func (mi *mockInspector) ByInspecting() autorest.RespondDecorator {
return func(r autorest.Responder) autorest.Responder {
return autorest.ResponderFunc(func(resp *http.Response) error {
mi.wasInvoked = true
return r.Respond(resp)
})
}
}

View file

@ -0,0 +1,144 @@
package azure
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"strings"
)
var environments = map[string]Environment{
"AZURECHINACLOUD": ChinaCloud,
"AZUREGERMANCLOUD": GermanCloud,
"AZUREPUBLICCLOUD": PublicCloud,
"AZUREUSGOVERNMENTCLOUD": USGovernmentCloud,
}
// Environment represents a set of endpoints for each of Azure's Clouds.
type Environment struct {
Name string `json:"name"`
ManagementPortalURL string `json:"managementPortalURL"`
PublishSettingsURL string `json:"publishSettingsURL"`
ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
GalleryEndpoint string `json:"galleryEndpoint"`
KeyVaultEndpoint string `json:"keyVaultEndpoint"`
GraphEndpoint string `json:"graphEndpoint"`
StorageEndpointSuffix string `json:"storageEndpointSuffix"`
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
}
var (
// PublicCloud is the default public Azure cloud environment
PublicCloud = Environment{
Name: "AzurePublicCloud",
ManagementPortalURL: "https://manage.windowsazure.com/",
PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index",
ServiceManagementEndpoint: "https://management.core.windows.net/",
ResourceManagerEndpoint: "https://management.azure.com/",
ActiveDirectoryEndpoint: "https://login.microsoftonline.com/",
GalleryEndpoint: "https://gallery.azure.com/",
KeyVaultEndpoint: "https://vault.azure.net/",
GraphEndpoint: "https://graph.windows.net/",
StorageEndpointSuffix: "core.windows.net",
SQLDatabaseDNSSuffix: "database.windows.net",
TrafficManagerDNSSuffix: "trafficmanager.net",
KeyVaultDNSSuffix: "vault.azure.net",
ServiceBusEndpointSuffix: "servicebus.azure.com",
ServiceManagementVMDNSSuffix: "cloudapp.net",
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
ContainerRegistryDNSSuffix: "azurecr.io",
}
// USGovernmentCloud is the cloud environment for the US Government
USGovernmentCloud = Environment{
Name: "AzureUSGovernmentCloud",
ManagementPortalURL: "https://manage.windowsazure.us/",
PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index",
ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/",
ResourceManagerEndpoint: "https://management.usgovcloudapi.net/",
ActiveDirectoryEndpoint: "https://login.microsoftonline.com/",
GalleryEndpoint: "https://gallery.usgovcloudapi.net/",
KeyVaultEndpoint: "https://vault.usgovcloudapi.net/",
GraphEndpoint: "https://graph.usgovcloudapi.net/",
StorageEndpointSuffix: "core.usgovcloudapi.net",
SQLDatabaseDNSSuffix: "database.usgovcloudapi.net",
TrafficManagerDNSSuffix: "usgovtrafficmanager.net",
KeyVaultDNSSuffix: "vault.usgovcloudapi.net",
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
ContainerRegistryDNSSuffix: "azurecr.io",
}
// ChinaCloud is the cloud environment operated in China
ChinaCloud = Environment{
Name: "AzureChinaCloud",
ManagementPortalURL: "https://manage.chinacloudapi.com/",
PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index",
ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/",
ResourceManagerEndpoint: "https://management.chinacloudapi.cn/",
ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/",
GalleryEndpoint: "https://gallery.chinacloudapi.cn/",
KeyVaultEndpoint: "https://vault.azure.cn/",
GraphEndpoint: "https://graph.chinacloudapi.cn/",
StorageEndpointSuffix: "core.chinacloudapi.cn",
SQLDatabaseDNSSuffix: "database.chinacloudapi.cn",
TrafficManagerDNSSuffix: "trafficmanager.cn",
KeyVaultDNSSuffix: "vault.azure.cn",
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net",
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
ContainerRegistryDNSSuffix: "azurecr.io",
}
// GermanCloud is the cloud environment operated in Germany
GermanCloud = Environment{
Name: "AzureGermanCloud",
ManagementPortalURL: "http://portal.microsoftazure.de/",
PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index",
ServiceManagementEndpoint: "https://management.core.cloudapi.de/",
ResourceManagerEndpoint: "https://management.microsoftazure.de/",
ActiveDirectoryEndpoint: "https://login.microsoftonline.de/",
GalleryEndpoint: "https://gallery.cloudapi.de/",
KeyVaultEndpoint: "https://vault.microsoftazure.de/",
GraphEndpoint: "https://graph.cloudapi.de/",
StorageEndpointSuffix: "core.cloudapi.de",
SQLDatabaseDNSSuffix: "database.cloudapi.de",
TrafficManagerDNSSuffix: "azuretrafficmanager.de",
KeyVaultDNSSuffix: "vault.microsoftazure.de",
ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
ContainerRegistryDNSSuffix: "azurecr.io",
}
)
// EnvironmentFromName returns an Environment based on the common name specified
func EnvironmentFromName(name string) (Environment, error) {
name = strings.ToUpper(name)
env, ok := environments[name]
if !ok {
return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name)
}
return env, nil
}

View file

@ -0,0 +1,230 @@
// test
package azure
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"testing"
)
func TestEnvironmentFromName(t *testing.T) {
name := "azurechinacloud"
if env, _ := EnvironmentFromName(name); env != ChinaCloud {
t.Errorf("Expected to get ChinaCloud for %q", name)
}
name = "AzureChinaCloud"
if env, _ := EnvironmentFromName(name); env != ChinaCloud {
t.Errorf("Expected to get ChinaCloud for %q", name)
}
name = "azuregermancloud"
if env, _ := EnvironmentFromName(name); env != GermanCloud {
t.Errorf("Expected to get GermanCloud for %q", name)
}
name = "AzureGermanCloud"
if env, _ := EnvironmentFromName(name); env != GermanCloud {
t.Errorf("Expected to get GermanCloud for %q", name)
}
name = "azurepubliccloud"
if env, _ := EnvironmentFromName(name); env != PublicCloud {
t.Errorf("Expected to get PublicCloud for %q", name)
}
name = "AzurePublicCloud"
if env, _ := EnvironmentFromName(name); env != PublicCloud {
t.Errorf("Expected to get PublicCloud for %q", name)
}
name = "azureusgovernmentcloud"
if env, _ := EnvironmentFromName(name); env != USGovernmentCloud {
t.Errorf("Expected to get USGovernmentCloud for %q", name)
}
name = "AzureUSGovernmentCloud"
if env, _ := EnvironmentFromName(name); env != USGovernmentCloud {
t.Errorf("Expected to get USGovernmentCloud for %q", name)
}
name = "thisisnotarealcloudenv"
if _, err := EnvironmentFromName(name); err == nil {
t.Errorf("Expected to get an error for %q", name)
}
}
func TestDeserializeEnvironment(t *testing.T) {
env := `{
"name": "--name--",
"ActiveDirectoryEndpoint": "--active-directory-endpoint--",
"galleryEndpoint": "--gallery-endpoint--",
"graphEndpoint": "--graph-endpoint--",
"keyVaultDNSSuffix": "--key-vault-dns-suffix--",
"keyVaultEndpoint": "--key-vault-endpoint--",
"managementPortalURL": "--management-portal-url--",
"publishSettingsURL": "--publish-settings-url--",
"resourceManagerEndpoint": "--resource-manager-endpoint--",
"serviceBusEndpointSuffix": "--service-bus-endpoint-suffix--",
"serviceManagementEndpoint": "--service-management-endpoint--",
"sqlDatabaseDNSSuffix": "--sql-database-dns-suffix--",
"storageEndpointSuffix": "--storage-endpoint-suffix--",
"trafficManagerDNSSuffix": "--traffic-manager-dns-suffix--",
"serviceManagementVMDNSSuffix": "--asm-vm-dns-suffix--",
"resourceManagerVMDNSSuffix": "--arm-vm-dns-suffix--",
"containerRegistryDNSSuffix": "--container-registry-dns-suffix--"
}`
testSubject := Environment{}
err := json.Unmarshal([]byte(env), &testSubject)
if err != nil {
t.Fatalf("failed to unmarshal: %s", err)
}
if "--name--" != testSubject.Name {
t.Errorf("Expected Name to be \"--name--\", but got %q", testSubject.Name)
}
if "--management-portal-url--" != testSubject.ManagementPortalURL {
t.Errorf("Expected ManagementPortalURL to be \"--management-portal-url--\", but got %q", testSubject.ManagementPortalURL)
}
if "--publish-settings-url--" != testSubject.PublishSettingsURL {
t.Errorf("Expected PublishSettingsURL to be \"--publish-settings-url--\", but got %q", testSubject.PublishSettingsURL)
}
if "--service-management-endpoint--" != testSubject.ServiceManagementEndpoint {
t.Errorf("Expected ServiceManagementEndpoint to be \"--service-management-endpoint--\", but got %q", testSubject.ServiceManagementEndpoint)
}
if "--resource-manager-endpoint--" != testSubject.ResourceManagerEndpoint {
t.Errorf("Expected ResourceManagerEndpoint to be \"--resource-manager-endpoint--\", but got %q", testSubject.ResourceManagerEndpoint)
}
if "--active-directory-endpoint--" != testSubject.ActiveDirectoryEndpoint {
t.Errorf("Expected ActiveDirectoryEndpoint to be \"--active-directory-endpoint--\", but got %q", testSubject.ActiveDirectoryEndpoint)
}
if "--gallery-endpoint--" != testSubject.GalleryEndpoint {
t.Errorf("Expected GalleryEndpoint to be \"--gallery-endpoint--\", but got %q", testSubject.GalleryEndpoint)
}
if "--key-vault-endpoint--" != testSubject.KeyVaultEndpoint {
t.Errorf("Expected KeyVaultEndpoint to be \"--key-vault-endpoint--\", but got %q", testSubject.KeyVaultEndpoint)
}
if "--graph-endpoint--" != testSubject.GraphEndpoint {
t.Errorf("Expected GraphEndpoint to be \"--graph-endpoint--\", but got %q", testSubject.GraphEndpoint)
}
if "--storage-endpoint-suffix--" != testSubject.StorageEndpointSuffix {
t.Errorf("Expected StorageEndpointSuffix to be \"--storage-endpoint-suffix--\", but got %q", testSubject.StorageEndpointSuffix)
}
if "--sql-database-dns-suffix--" != testSubject.SQLDatabaseDNSSuffix {
t.Errorf("Expected sql-database-dns-suffix to be \"--sql-database-dns-suffix--\", but got %q", testSubject.SQLDatabaseDNSSuffix)
}
if "--key-vault-dns-suffix--" != testSubject.KeyVaultDNSSuffix {
t.Errorf("Expected StorageEndpointSuffix to be \"--key-vault-dns-suffix--\", but got %q", testSubject.KeyVaultDNSSuffix)
}
if "--service-bus-endpoint-suffix--" != testSubject.ServiceBusEndpointSuffix {
t.Errorf("Expected StorageEndpointSuffix to be \"--service-bus-endpoint-suffix--\", but got %q", testSubject.ServiceBusEndpointSuffix)
}
if "--asm-vm-dns-suffix--" != testSubject.ServiceManagementVMDNSSuffix {
t.Errorf("Expected ServiceManagementVMDNSSuffix to be \"--asm-vm-dns-suffix--\", but got %q", testSubject.ServiceManagementVMDNSSuffix)
}
if "--arm-vm-dns-suffix--" != testSubject.ResourceManagerVMDNSSuffix {
t.Errorf("Expected ResourceManagerVMDNSSuffix to be \"--arm-vm-dns-suffix--\", but got %q", testSubject.ResourceManagerVMDNSSuffix)
}
if "--container-registry-dns-suffix--" != testSubject.ContainerRegistryDNSSuffix {
t.Errorf("Expected ContainerRegistryDNSSuffix to be \"--container-registry-dns-suffix--\", but got %q", testSubject.ContainerRegistryDNSSuffix)
}
}
func TestRoundTripSerialization(t *testing.T) {
env := Environment{
Name: "--unit-test--",
ManagementPortalURL: "--management-portal-url",
PublishSettingsURL: "--publish-settings-url--",
ServiceManagementEndpoint: "--service-management-endpoint--",
ResourceManagerEndpoint: "--resource-management-endpoint--",
ActiveDirectoryEndpoint: "--active-directory-endpoint--",
GalleryEndpoint: "--gallery-endpoint--",
KeyVaultEndpoint: "--key-vault--endpoint--",
GraphEndpoint: "--graph-endpoint--",
StorageEndpointSuffix: "--storage-endpoint-suffix--",
SQLDatabaseDNSSuffix: "--sql-database-dns-suffix--",
TrafficManagerDNSSuffix: "--traffic-manager-dns-suffix--",
KeyVaultDNSSuffix: "--key-vault-dns-suffix--",
ServiceBusEndpointSuffix: "--service-bus-endpoint-suffix--",
ServiceManagementVMDNSSuffix: "--asm-vm-dns-suffix--",
ResourceManagerVMDNSSuffix: "--arm-vm-dns-suffix--",
ContainerRegistryDNSSuffix: "--container-registry-dns-suffix--",
}
bytes, err := json.Marshal(env)
if err != nil {
t.Fatalf("failed to marshal: %s", err)
}
testSubject := Environment{}
err = json.Unmarshal(bytes, &testSubject)
if err != nil {
t.Fatalf("failed to unmarshal: %s", err)
}
if env.Name != testSubject.Name {
t.Errorf("Expected Name to be %q, but got %q", env.Name, testSubject.Name)
}
if env.ManagementPortalURL != testSubject.ManagementPortalURL {
t.Errorf("Expected ManagementPortalURL to be %q, but got %q", env.ManagementPortalURL, testSubject.ManagementPortalURL)
}
if env.PublishSettingsURL != testSubject.PublishSettingsURL {
t.Errorf("Expected PublishSettingsURL to be %q, but got %q", env.PublishSettingsURL, testSubject.PublishSettingsURL)
}
if env.ServiceManagementEndpoint != testSubject.ServiceManagementEndpoint {
t.Errorf("Expected ServiceManagementEndpoint to be %q, but got %q", env.ServiceManagementEndpoint, testSubject.ServiceManagementEndpoint)
}
if env.ResourceManagerEndpoint != testSubject.ResourceManagerEndpoint {
t.Errorf("Expected ResourceManagerEndpoint to be %q, but got %q", env.ResourceManagerEndpoint, testSubject.ResourceManagerEndpoint)
}
if env.ActiveDirectoryEndpoint != testSubject.ActiveDirectoryEndpoint {
t.Errorf("Expected ActiveDirectoryEndpoint to be %q, but got %q", env.ActiveDirectoryEndpoint, testSubject.ActiveDirectoryEndpoint)
}
if env.GalleryEndpoint != testSubject.GalleryEndpoint {
t.Errorf("Expected GalleryEndpoint to be %q, but got %q", env.GalleryEndpoint, testSubject.GalleryEndpoint)
}
if env.KeyVaultEndpoint != testSubject.KeyVaultEndpoint {
t.Errorf("Expected KeyVaultEndpoint to be %q, but got %q", env.KeyVaultEndpoint, testSubject.KeyVaultEndpoint)
}
if env.GraphEndpoint != testSubject.GraphEndpoint {
t.Errorf("Expected GraphEndpoint to be %q, but got %q", env.GraphEndpoint, testSubject.GraphEndpoint)
}
if env.StorageEndpointSuffix != testSubject.StorageEndpointSuffix {
t.Errorf("Expected StorageEndpointSuffix to be %q, but got %q", env.StorageEndpointSuffix, testSubject.StorageEndpointSuffix)
}
if env.SQLDatabaseDNSSuffix != testSubject.SQLDatabaseDNSSuffix {
t.Errorf("Expected SQLDatabaseDNSSuffix to be %q, but got %q", env.SQLDatabaseDNSSuffix, testSubject.SQLDatabaseDNSSuffix)
}
if env.TrafficManagerDNSSuffix != testSubject.TrafficManagerDNSSuffix {
t.Errorf("Expected TrafficManagerDNSSuffix to be %q, but got %q", env.TrafficManagerDNSSuffix, testSubject.TrafficManagerDNSSuffix)
}
if env.KeyVaultDNSSuffix != testSubject.KeyVaultDNSSuffix {
t.Errorf("Expected KeyVaultDNSSuffix to be %q, but got %q", env.KeyVaultDNSSuffix, testSubject.KeyVaultDNSSuffix)
}
if env.ServiceBusEndpointSuffix != testSubject.ServiceBusEndpointSuffix {
t.Errorf("Expected ServiceBusEndpointSuffix to be %q, but got %q", env.ServiceBusEndpointSuffix, testSubject.ServiceBusEndpointSuffix)
}
if env.ServiceManagementVMDNSSuffix != testSubject.ServiceManagementVMDNSSuffix {
t.Errorf("Expected ServiceManagementVMDNSSuffix to be %q, but got %q", env.ServiceManagementVMDNSSuffix, testSubject.ServiceManagementVMDNSSuffix)
}
if env.ResourceManagerVMDNSSuffix != testSubject.ResourceManagerVMDNSSuffix {
t.Errorf("Expected ResourceManagerVMDNSSuffix to be %q, but got %q", env.ResourceManagerVMDNSSuffix, testSubject.ResourceManagerVMDNSSuffix)
}
if env.ContainerRegistryDNSSuffix != testSubject.ContainerRegistryDNSSuffix {
t.Errorf("Expected ContainerRegistryDNSSuffix to be %q, but got %q", env.ContainerRegistryDNSSuffix, testSubject.ContainerRegistryDNSSuffix)
}
}

View file

@ -0,0 +1,202 @@
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package azure
import (
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
)
// DoRetryWithRegistration tries to register the resource provider in case it is unregistered.
// It also handles request retries
func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator {
return func(s autorest.Sender) autorest.Sender {
return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
rr := autorest.NewRetriableRequest(r)
for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ {
err = rr.Prepare()
if err != nil {
return resp, err
}
resp, err = autorest.SendWithSender(s, rr.Request(),
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
)
if err != nil {
return resp, err
}
if resp.StatusCode != http.StatusConflict {
return resp, err
}
var re RequestError
err = autorest.Respond(
resp,
autorest.ByUnmarshallingJSON(&re),
)
if err != nil {
return resp, err
}
if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" {
err = register(client, r, re)
if err != nil {
return resp, fmt.Errorf("failed auto registering Resource Provider: %s", err)
}
}
}
return resp, errors.New("failed request and resource provider registration")
})
}
}
func getProvider(re RequestError) (string, error) {
if re.ServiceError != nil {
if re.ServiceError.Details != nil && len(*re.ServiceError.Details) > 0 {
detail := (*re.ServiceError.Details)[0].(map[string]interface{})
return detail["target"].(string), nil
}
}
return "", errors.New("provider was not found in the response")
}
func register(client autorest.Client, originalReq *http.Request, re RequestError) error {
subID := getSubscription(originalReq.URL.Path)
if subID == "" {
return errors.New("missing parameter subscriptionID to register resource provider")
}
providerName, err := getProvider(re)
if err != nil {
return fmt.Errorf("missing parameter provider to register resource provider: %s", err)
}
newURL := url.URL{
Scheme: originalReq.URL.Scheme,
Host: originalReq.URL.Host,
}
// taken from the resources SDK
// with almost identical code, this sections are easier to mantain
// It is also not a good idea to import the SDK here
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252
pathParameters := map[string]interface{}{
"resourceProviderNamespace": autorest.Encode("path", providerName),
"subscriptionId": autorest.Encode("path", subID),
}
const APIVersion = "2016-09-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(newURL.String()),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters),
autorest.WithQueryParameters(queryParameters),
)
req, err := preparer.Prepare(&http.Request{})
if err != nil {
return err
}
req.Cancel = originalReq.Cancel
resp, err := autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
)
if err != nil {
return err
}
type Provider struct {
RegistrationState *string `json:"registrationState,omitempty"`
}
var provider Provider
err = autorest.Respond(
resp,
WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&provider),
autorest.ByClosing(),
)
if err != nil {
return err
}
// poll for registered provisioning state
now := time.Now()
for err == nil && time.Since(now) < client.PollingDuration {
// taken from the resources SDK
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(newURL.String()),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters),
autorest.WithQueryParameters(queryParameters),
)
req, err = preparer.Prepare(&http.Request{})
if err != nil {
return err
}
req.Cancel = originalReq.Cancel
resp, err := autorest.SendWithSender(client.Sender, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
)
if err != nil {
return err
}
err = autorest.Respond(
resp,
WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&provider),
autorest.ByClosing(),
)
if err != nil {
return err
}
if provider.RegistrationState != nil &&
*provider.RegistrationState == "Registered" {
break
}
delayed := autorest.DelayWithRetryAfter(resp, originalReq.Cancel)
if !delayed {
autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Cancel)
}
}
if !(time.Since(now) < client.PollingDuration) {
return errors.New("polling for resource provider registration has exceeded the polling duration")
}
return err
}
func getSubscription(path string) string {
parts := strings.Split(path, "/")
for i, v := range parts {
if v == "subscriptions" && (i+1) < len(parts) {
return parts[i+1]
}
}
return ""
}

View file

@ -0,0 +1,81 @@
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package azure
import (
"net/http"
"testing"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/mocks"
)
func TestDoRetryWithRegistration(t *testing.T) {
client := mocks.NewSender()
// first response, should retry because it is a transient error
client.AppendResponse(mocks.NewResponseWithStatus("Internal server error", http.StatusInternalServerError))
// response indicates the resource provider has not been registered
client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{
"error":{
"code":"MissingSubscriptionRegistration",
"message":"The subscription registration is in 'Unregistered' state. The subscription must be registered to use namespace 'Microsoft.EventGrid'. See https://aka.ms/rps-not-found for how to register subscriptions.",
"details":[
{
"code":"MissingSubscriptionRegistration",
"target":"Microsoft.EventGrid",
"message":"The subscription registration is in 'Unregistered' state. The subscription must be registered to use namespace 'Microsoft.EventGrid'. See https://aka.ms/rps-not-found for how to register subscriptions."
}
]
}
}
`), http.StatusConflict, "MissingSubscriptionRegistration"))
// first poll response, still not ready
client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{
"registrationState": "Registering"
}
`), http.StatusOK, "200 OK"))
// last poll response, respurce provider has been registered
client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{
"registrationState": "Registered"
}
`), http.StatusOK, "200 OK"))
// retry original request, response is successful
client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK))
req := mocks.NewRequestForURL("https://lol/subscriptions/rofl")
req.Body = mocks.NewBody("lolol")
r, err := autorest.SendWithSender(client, req,
DoRetryWithRegistration(autorest.Client{
PollingDelay: time.Second,
PollingDuration: time.Second * 10,
RetryAttempts: 5,
RetryDuration: time.Second,
Sender: client,
}),
)
if err != nil {
t.Fatalf("got error: %v", err)
}
autorest.Respond(r,
autorest.ByDiscardingBody(),
autorest.ByClosing(),
)
if r.StatusCode != http.StatusOK {
t.Fatalf("azure: Sender#DoRetryWithRegistration -- Got: StatusCode %v; Want: StatusCode 200 OK", r.StatusCode)
}
}

251
vendor/github.com/Azure/go-autorest/autorest/client.go generated vendored Normal file
View file

@ -0,0 +1,251 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/http/cookiejar"
"runtime"
"time"
)
const (
// DefaultPollingDelay is a reasonable delay between polling requests.
DefaultPollingDelay = 60 * time.Second
// DefaultPollingDuration is a reasonable total polling duration.
DefaultPollingDuration = 15 * time.Minute
// DefaultRetryAttempts is number of attempts for retry status codes (5xx).
DefaultRetryAttempts = 3
)
var (
// defaultUserAgent builds a string containing the Go version, system archityecture and OS,
// and the go-autorest version.
defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
Version(),
)
// StatusCodesForRetry are a defined group of status code for which the client will retry
StatusCodesForRetry = []int{
http.StatusRequestTimeout, // 408
http.StatusTooManyRequests, // 429
http.StatusInternalServerError, // 500
http.StatusBadGateway, // 502
http.StatusServiceUnavailable, // 503
http.StatusGatewayTimeout, // 504
}
)
const (
requestFormat = `HTTP Request Begin ===================================================
%s
===================================================== HTTP Request End
`
responseFormat = `HTTP Response Begin ===================================================
%s
===================================================== HTTP Response End
`
)
// Response serves as the base for all responses from generated clients. It provides access to the
// last http.Response.
type Response struct {
*http.Response `json:"-"`
}
// LoggingInspector implements request and response inspectors that log the full request and
// response to a supplied log.
type LoggingInspector struct {
Logger *log.Logger
}
// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The
// body is restored after being emitted.
//
// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
// important. It is best used to trace JSON or similar body values.
func (li LoggingInspector) WithInspection() PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
var body, b bytes.Buffer
defer r.Body.Close()
r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body))
if err := r.Write(&b); err != nil {
return nil, fmt.Errorf("Failed to write response: %v", err)
}
li.Logger.Printf(requestFormat, b.String())
r.Body = ioutil.NopCloser(&body)
return p.Prepare(r)
})
}
}
// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The
// body is restored after being emitted.
//
// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
// important. It is best used to trace JSON or similar body values.
func (li LoggingInspector) ByInspecting() RespondDecorator {
return func(r Responder) Responder {
return ResponderFunc(func(resp *http.Response) error {
var body, b bytes.Buffer
defer resp.Body.Close()
resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body))
if err := resp.Write(&b); err != nil {
return fmt.Errorf("Failed to write response: %v", err)
}
li.Logger.Printf(responseFormat, b.String())
resp.Body = ioutil.NopCloser(&body)
return r.Respond(resp)
})
}
}
// Client is the base for autorest generated clients. It provides default, "do nothing"
// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the
// standard, undecorated http.Client as a default Sender.
//
// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and
// return responses that compose with Response.
//
// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom
// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit
// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence
// sending the request by providing a decorated Sender.
type Client struct {
Authorizer Authorizer
Sender Sender
RequestInspector PrepareDecorator
ResponseInspector RespondDecorator
// PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header
PollingDelay time.Duration
// PollingDuration sets the maximum polling time after which an error is returned.
PollingDuration time.Duration
// RetryAttempts sets the default number of retry attempts for client.
RetryAttempts int
// RetryDuration sets the delay duration for retries.
RetryDuration time.Duration
// UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent
// through the Do method.
UserAgent string
Jar http.CookieJar
}
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
// string.
func NewClientWithUserAgent(ua string) Client {
c := Client{
PollingDelay: DefaultPollingDelay,
PollingDuration: DefaultPollingDuration,
RetryAttempts: DefaultRetryAttempts,
RetryDuration: 30 * time.Second,
UserAgent: defaultUserAgent,
}
c.Sender = c.sender()
c.AddToUserAgent(ua)
return c
}
// AddToUserAgent adds an extension to the current user agent
func (c *Client) AddToUserAgent(extension string) error {
if extension != "" {
c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension)
return nil
}
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent)
}
// Do implements the Sender interface by invoking the active Sender after applying authorization.
// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent
// is set, apply set the User-Agent header.
func (c Client) Do(r *http.Request) (*http.Response, error) {
if r.UserAgent() == "" {
r, _ = Prepare(r,
WithUserAgent(c.UserAgent))
}
r, err := Prepare(r,
c.WithInspection(),
c.WithAuthorization())
if err != nil {
return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
}
resp, err := SendWithSender(c.sender(), r)
Respond(resp, c.ByInspecting())
return resp, err
}
// sender returns the Sender to which to send requests.
func (c Client) sender() Sender {
if c.Sender == nil {
j, _ := cookiejar.New(nil)
return &http.Client{Jar: j}
}
return c.Sender
}
// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator
// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer.
func (c Client) WithAuthorization() PrepareDecorator {
return c.authorizer().WithAuthorization()
}
// authorizer returns the Authorizer to use.
func (c Client) authorizer() Authorizer {
if c.Authorizer == nil {
return NullAuthorizer{}
}
return c.Authorizer
}
// WithInspection is a convenience method that passes the request to the supplied RequestInspector,
// if present, or returns the WithNothing PrepareDecorator otherwise.
func (c Client) WithInspection() PrepareDecorator {
if c.RequestInspector == nil {
return WithNothing()
}
return c.RequestInspector
}
// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector,
// if present, or returns the ByIgnoring RespondDecorator otherwise.
func (c Client) ByInspecting() RespondDecorator {
if c.ResponseInspector == nil {
return ByIgnoring()
}
return c.ResponseInspector
}

View file

@ -0,0 +1,402 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"time"
"github.com/Azure/go-autorest/autorest/mocks"
)
func TestLoggingInspectorWithInspection(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.RequestInspector = li.WithInspection()
Prepare(mocks.NewRequestWithContent("Content"),
c.WithInspection())
if len(b.String()) <= 0 {
t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log")
}
}
func TestLoggingInspectorWithInspectionEmitsErrors(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
r := mocks.NewRequestWithContent("Content")
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.RequestInspector = li.WithInspection()
if _, err := Prepare(r,
c.WithInspection()); err != nil {
t.Error(err)
}
if len(b.String()) <= 0 {
t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log")
}
}
func TestLoggingInspectorWithInspectionRestoresBody(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
r := mocks.NewRequestWithContent("Content")
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.RequestInspector = li.WithInspection()
Prepare(r,
c.WithInspection())
s, _ := ioutil.ReadAll(r.Body)
if len(s) <= 0 {
t.Fatal("autorest: LoggingInspector#WithInspection did not restore the Request body")
}
}
func TestLoggingInspectorByInspecting(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.ResponseInspector = li.ByInspecting()
Respond(mocks.NewResponseWithContent("Content"),
c.ByInspecting())
if len(b.String()) <= 0 {
t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log")
}
}
func TestLoggingInspectorByInspectingEmitsErrors(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
r := mocks.NewResponseWithContent("Content")
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.ResponseInspector = li.ByInspecting()
if err := Respond(r,
c.ByInspecting()); err != nil {
t.Fatal(err)
}
if len(b.String()) <= 0 {
t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log")
}
}
func TestLoggingInspectorByInspectingRestoresBody(t *testing.T) {
b := bytes.Buffer{}
c := Client{}
r := mocks.NewResponseWithContent("Content")
li := LoggingInspector{Logger: log.New(&b, "", 0)}
c.ResponseInspector = li.ByInspecting()
Respond(r,
c.ByInspecting())
s, _ := ioutil.ReadAll(r.Body)
if len(s) <= 0 {
t.Fatal("autorest: LoggingInspector#ByInspecting did not restore the Response body")
}
}
func TestNewClientWithUserAgent(t *testing.T) {
ua := "UserAgent"
c := NewClientWithUserAgent(ua)
completeUA := fmt.Sprintf("%s %s", defaultUserAgent, ua)
if c.UserAgent != completeUA {
t.Fatalf("autorest: NewClientWithUserAgent failed to set the UserAgent -- expected %s, received %s",
completeUA, c.UserAgent)
}
}
func TestAddToUserAgent(t *testing.T) {
ua := "UserAgent"
c := NewClientWithUserAgent(ua)
ext := "extension"
err := c.AddToUserAgent(ext)
if err != nil {
t.Fatalf("autorest: AddToUserAgent returned error -- expected nil, received %s", err)
}
completeUA := fmt.Sprintf("%s %s %s", defaultUserAgent, ua, ext)
if c.UserAgent != completeUA {
t.Fatalf("autorest: AddToUserAgent failed to add an extension to the UserAgent -- expected %s, received %s",
completeUA, c.UserAgent)
}
err = c.AddToUserAgent("")
if err == nil {
t.Fatalf("autorest: AddToUserAgent didn't return error -- expected %s, received nil",
fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent))
}
if c.UserAgent != completeUA {
t.Fatalf("autorest: AddToUserAgent failed to not add an empty extension to the UserAgent -- expected %s, received %s",
completeUA, c.UserAgent)
}
}
func TestClientSenderReturnsHttpClientByDefault(t *testing.T) {
c := Client{}
if fmt.Sprintf("%T", c.sender()) != "*http.Client" {
t.Fatal("autorest: Client#sender failed to return http.Client by default")
}
}
func TestClientSenderReturnsSetSender(t *testing.T) {
c := Client{}
s := mocks.NewSender()
c.Sender = s
if c.sender() != s {
t.Fatal("autorest: Client#sender failed to return set Sender")
}
}
func TestClientDoInvokesSender(t *testing.T) {
c := Client{}
s := mocks.NewSender()
c.Sender = s
c.Do(&http.Request{})
if s.Attempts() != 1 {
t.Fatal("autorest: Client#Do failed to invoke the Sender")
}
}
func TestClientDoSetsUserAgent(t *testing.T) {
ua := "UserAgent"
c := Client{UserAgent: ua}
r := mocks.NewRequest()
s := mocks.NewSender()
c.Sender = s
c.Do(r)
if r.UserAgent() != ua {
t.Fatalf("autorest: Client#Do failed to correctly set User-Agent header: %s=%s",
http.CanonicalHeaderKey(headerUserAgent), r.UserAgent())
}
}
func TestClientDoSetsAuthorization(t *testing.T) {
r := mocks.NewRequest()
s := mocks.NewSender()
c := Client{Authorizer: mockAuthorizer{}, Sender: s}
c.Do(r)
if len(r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) <= 0 {
t.Fatalf("autorest: Client#Send failed to set Authorization header -- %s=%s",
http.CanonicalHeaderKey(headerAuthorization),
r.Header.Get(http.CanonicalHeaderKey(headerAuthorization)))
}
}
func TestClientDoInvokesRequestInspector(t *testing.T) {
r := mocks.NewRequest()
s := mocks.NewSender()
i := &mockInspector{}
c := Client{RequestInspector: i.WithInspection(), Sender: s}
c.Do(r)
if !i.wasInvoked {
t.Fatal("autorest: Client#Send failed to invoke the RequestInspector")
}
}
func TestClientDoInvokesResponseInspector(t *testing.T) {
r := mocks.NewRequest()
s := mocks.NewSender()
i := &mockInspector{}
c := Client{ResponseInspector: i.ByInspecting(), Sender: s}
c.Do(r)
if !i.wasInvoked {
t.Fatal("autorest: Client#Send failed to invoke the ResponseInspector")
}
}
func TestClientDoReturnsErrorIfPrepareFails(t *testing.T) {
c := Client{}
s := mocks.NewSender()
c.Authorizer = mockFailingAuthorizer{}
c.Sender = s
_, err := c.Do(&http.Request{})
if err == nil {
t.Fatalf("autorest: Client#Do failed to return an error when Prepare failed")
}
}
func TestClientDoDoesNotSendIfPrepareFails(t *testing.T) {
c := Client{}
s := mocks.NewSender()
c.Authorizer = mockFailingAuthorizer{}
c.Sender = s
c.Do(&http.Request{})
if s.Attempts() > 0 {
t.Fatal("autorest: Client#Do failed to invoke the Sender")
}
}
func TestClientAuthorizerReturnsNullAuthorizerByDefault(t *testing.T) {
c := Client{}
if fmt.Sprintf("%T", c.authorizer()) != "autorest.NullAuthorizer" {
t.Fatal("autorest: Client#authorizer failed to return the NullAuthorizer by default")
}
}
func TestClientAuthorizerReturnsSetAuthorizer(t *testing.T) {
c := Client{}
c.Authorizer = mockAuthorizer{}
if fmt.Sprintf("%T", c.authorizer()) != "autorest.mockAuthorizer" {
t.Fatal("autorest: Client#authorizer failed to return the set Authorizer")
}
}
func TestClientWithAuthorizer(t *testing.T) {
c := Client{}
c.Authorizer = mockAuthorizer{}
req, _ := Prepare(&http.Request{},
c.WithAuthorization())
if req.Header.Get(headerAuthorization) == "" {
t.Fatal("autorest: Client#WithAuthorizer failed to return the WithAuthorizer from the active Authorizer")
}
}
func TestClientWithInspection(t *testing.T) {
c := Client{}
r := &mockInspector{}
c.RequestInspector = r.WithInspection()
Prepare(&http.Request{},
c.WithInspection())
if !r.wasInvoked {
t.Fatal("autorest: Client#WithInspection failed to invoke RequestInspector")
}
}
func TestClientWithInspectionSetsDefault(t *testing.T) {
c := Client{}
r1 := &http.Request{}
r2, _ := Prepare(r1,
c.WithInspection())
if !reflect.DeepEqual(r1, r2) {
t.Fatal("autorest: Client#WithInspection failed to provide a default RequestInspector")
}
}
func TestClientByInspecting(t *testing.T) {
c := Client{}
r := &mockInspector{}
c.ResponseInspector = r.ByInspecting()
Respond(&http.Response{},
c.ByInspecting())
if !r.wasInvoked {
t.Fatal("autorest: Client#ByInspecting failed to invoke ResponseInspector")
}
}
func TestClientByInspectingSetsDefault(t *testing.T) {
c := Client{}
r := &http.Response{}
Respond(r,
c.ByInspecting())
if !reflect.DeepEqual(r, &http.Response{}) {
t.Fatal("autorest: Client#ByInspecting failed to provide a default ResponseInspector")
}
}
func TestCookies(t *testing.T) {
second := "second"
expected := http.Cookie{
Name: "tastes",
Value: "delicious",
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.SetCookie(w, &expected)
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("autorest: ioutil.ReadAll failed reading request body: %s", err)
}
if string(b) == second {
cookie, err := r.Cookie(expected.Name)
if err != nil {
t.Fatalf("autorest: r.Cookie could not get request cookie: %s", err)
}
if cookie == nil {
t.Fatalf("autorest: got nil cookie, expecting %v", expected)
}
if cookie.Value != expected.Value {
t.Fatalf("autorest: got cookie value '%s', expecting '%s'", cookie.Value, expected.Name)
}
}
}))
defer server.Close()
client := NewClientWithUserAgent("")
_, err := SendWithSender(client, mocks.NewRequestForURL(server.URL))
if err != nil {
t.Fatalf("autorest: first request failed: %s", err)
}
r2, err := http.NewRequest(http.MethodGet, server.URL, mocks.NewBody(second))
if err != nil {
t.Fatalf("autorest: failed creating second request: %s", err)
}
_, err = SendWithSender(client, r2)
if err != nil {
t.Fatalf("autorest: second request failed: %s", err)
}
}
func randomString(n int) string {
const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
s := make([]byte, n)
for i := range s {
s[i] = chars[r.Intn(len(chars))]
}
return string(s)
}

View file

@ -0,0 +1,96 @@
/*
Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/)
defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of
time.Time types. And both convert to time.Time through a ToTime method.
*/
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"time"
)
const (
fullDate = "2006-01-02"
fullDateJSON = `"2006-01-02"`
dateFormat = "%04d-%02d-%02d"
jsonFormat = `"%04d-%02d-%02d"`
)
// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e.,
// 2006-01-02).
type Date struct {
time.Time
}
// ParseDate create a new Date from the passed string.
func ParseDate(date string) (d Date, err error) {
return parseDate(date, fullDate)
}
func parseDate(date string, format string) (Date, error) {
d, err := time.Parse(format, date)
return Date{Time: d}, err
}
// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d Date) MarshalBinary() ([]byte, error) {
return d.MarshalText()
}
// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d *Date) UnmarshalBinary(data []byte) error {
return d.UnmarshalText(data)
}
// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d Date) MarshalJSON() (json []byte, err error) {
return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil
}
// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d *Date) UnmarshalJSON(data []byte) (err error) {
d.Time, err = time.Parse(fullDateJSON, string(data))
return err
}
// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d Date) MarshalText() (text []byte, err error) {
return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil
}
// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
// 2006-01-02).
func (d *Date) UnmarshalText(data []byte) (err error) {
d.Time, err = time.Parse(fullDate, string(data))
return err
}
// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02).
func (d Date) String() string {
return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())
}
// ToTime returns a Date as a time.Time
func (d Date) ToTime() time.Time {
return d.Time
}

View file

@ -0,0 +1,237 @@
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
"reflect"
"testing"
"time"
)
func ExampleParseDate() {
d, err := ParseDate("2001-02-03")
if err != nil {
fmt.Println(err)
}
fmt.Println(d)
// Output: 2001-02-03
}
func ExampleDate() {
d, err := ParseDate("2001-02-03")
if err != nil {
fmt.Println(err)
}
t, err := time.Parse(time.RFC3339, "2001-02-04T00:00:00Z")
if err != nil {
fmt.Println(err)
}
// Date acts as time.Time when the receiver
if d.Before(t) {
fmt.Printf("Before ")
} else {
fmt.Printf("After ")
}
// Convert Date when needing a time.Time
if t.After(d.ToTime()) {
fmt.Printf("After")
} else {
fmt.Printf("Before")
}
// Output: Before After
}
func ExampleDate_MarshalBinary() {
d, err := ParseDate("2001-02-03")
if err != nil {
fmt.Println(err)
}
t, err := d.MarshalBinary()
if err != nil {
fmt.Println(err)
}
fmt.Println(string(t))
// Output: 2001-02-03
}
func ExampleDate_UnmarshalBinary() {
d := Date{}
t := "2001-02-03"
if err := d.UnmarshalBinary([]byte(t)); err != nil {
fmt.Println(err)
}
fmt.Println(d)
// Output: 2001-02-03
}
func ExampleDate_MarshalJSON() {
d, err := ParseDate("2001-02-03")
if err != nil {
fmt.Println(err)
}
j, err := json.Marshal(d)
if err != nil {
fmt.Println(err)
}
fmt.Println(string(j))
// Output: "2001-02-03"
}
func ExampleDate_UnmarshalJSON() {
var d struct {
Date Date `json:"date"`
}
j := `{"date" : "2001-02-03"}`
if err := json.Unmarshal([]byte(j), &d); err != nil {
fmt.Println(err)
}
fmt.Println(d.Date)
// Output: 2001-02-03
}
func ExampleDate_MarshalText() {
d, err := ParseDate("2001-02-03")
if err != nil {
fmt.Println(err)
}
t, err := d.MarshalText()
if err != nil {
fmt.Println(err)
}
fmt.Println(string(t))
// Output: 2001-02-03
}
func ExampleDate_UnmarshalText() {
d := Date{}
t := "2001-02-03"
if err := d.UnmarshalText([]byte(t)); err != nil {
fmt.Println(err)
}
fmt.Println(d)
// Output: 2001-02-03
}
func TestDateString(t *testing.T) {
d, err := ParseDate("2001-02-03")
if err != nil {
t.Fatalf("date: String failed (%v)", err)
}
if d.String() != "2001-02-03" {
t.Fatalf("date: String failed (%v)", d.String())
}
}
func TestDateBinaryRoundTrip(t *testing.T) {
d1, err := ParseDate("2001-02-03")
if err != nil {
t.Fatalf("date: ParseDate failed (%v)", err)
}
t1, err := d1.MarshalBinary()
if err != nil {
t.Fatalf("date: MarshalBinary failed (%v)", err)
}
d2 := Date{}
if err = d2.UnmarshalBinary(t1); err != nil {
t.Fatalf("date: UnmarshalBinary failed (%v)", err)
}
if !reflect.DeepEqual(d1, d2) {
t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2)
}
}
func TestDateJSONRoundTrip(t *testing.T) {
type s struct {
Date Date `json:"date"`
}
var err error
d1 := s{}
d1.Date, err = ParseDate("2001-02-03")
if err != nil {
t.Fatalf("date: ParseDate failed (%v)", err)
}
j, err := json.Marshal(d1)
if err != nil {
t.Fatalf("date: MarshalJSON failed (%v)", err)
}
d2 := s{}
if err = json.Unmarshal(j, &d2); err != nil {
t.Fatalf("date: UnmarshalJSON failed (%v)", err)
}
if !reflect.DeepEqual(d1, d2) {
t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2)
}
}
func TestDateTextRoundTrip(t *testing.T) {
d1, err := ParseDate("2001-02-03")
if err != nil {
t.Fatalf("date: ParseDate failed (%v)", err)
}
t1, err := d1.MarshalText()
if err != nil {
t.Fatalf("date: MarshalText failed (%v)", err)
}
d2 := Date{}
if err = d2.UnmarshalText(t1); err != nil {
t.Fatalf("date: UnmarshalText failed (%v)", err)
}
if !reflect.DeepEqual(d1, d2) {
t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2)
}
}
func TestDateToTime(t *testing.T) {
var d Date
d, err := ParseDate("2001-02-03")
if err != nil {
t.Fatalf("date: ParseDate failed (%v)", err)
}
var _ time.Time = d.ToTime()
}
func TestDateUnmarshalJSONReturnsError(t *testing.T) {
var d struct {
Date Date `json:"date"`
}
j := `{"date" : "February 3, 2001"}`
if err := json.Unmarshal([]byte(j), &d); err == nil {
t.Fatal("date: Date failed to return error for malformed JSON date")
}
}
func TestDateUnmarshalTextReturnsError(t *testing.T) {
d := Date{}
txt := "February 3, 2001"
if err := d.UnmarshalText([]byte(txt)); err == nil {
t.Fatal("date: Date failed to return error for malformed Text date")
}
}

View file

@ -0,0 +1,103 @@
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"regexp"
"time"
)
// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases.
const (
azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"`
azureUtcFormat = "2006-01-02T15:04:05.999999999"
rfc3339JSON = `"` + time.RFC3339Nano + `"`
rfc3339 = time.RFC3339Nano
tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$`
)
// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e.,
// 2006-01-02T15:04:05Z).
type Time struct {
time.Time
}
// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
// 2006-01-02T15:04:05Z).
func (t Time) MarshalBinary() ([]byte, error) {
return t.Time.MarshalText()
}
// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
// (i.e., 2006-01-02T15:04:05Z).
func (t *Time) UnmarshalBinary(data []byte) error {
return t.UnmarshalText(data)
}
// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e.,
// 2006-01-02T15:04:05Z).
func (t Time) MarshalJSON() (json []byte, err error) {
return t.Time.MarshalJSON()
}
// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time
// (i.e., 2006-01-02T15:04:05Z).
func (t *Time) UnmarshalJSON(data []byte) (err error) {
timeFormat := azureUtcFormatJSON
match, err := regexp.Match(tzOffsetRegex, data)
if err != nil {
return err
} else if match {
timeFormat = rfc3339JSON
}
t.Time, err = ParseTime(timeFormat, string(data))
return err
}
// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
// 2006-01-02T15:04:05Z).
func (t Time) MarshalText() (text []byte, err error) {
return t.Time.MarshalText()
}
// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
// (i.e., 2006-01-02T15:04:05Z).
func (t *Time) UnmarshalText(data []byte) (err error) {
timeFormat := azureUtcFormat
match, err := regexp.Match(tzOffsetRegex, data)
if err != nil {
return err
} else if match {
timeFormat = rfc3339
}
t.Time, err = ParseTime(timeFormat, string(data))
return err
}
// String returns the Time formatted as an RFC3339 date-time string (i.e.,
// 2006-01-02T15:04:05Z).
func (t Time) String() string {
// Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does.
b, err := t.MarshalText()
if err != nil {
return ""
}
return string(b)
}
// ToTime returns a Time as a time.Time
func (t Time) ToTime() time.Time {
return t.Time
}

View file

@ -0,0 +1,277 @@
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
"reflect"
"testing"
"time"
)
func ExampleParseTime() {
d, _ := ParseTime(rfc3339, "2001-02-03T04:05:06Z")
fmt.Println(d)
// Output: 2001-02-03 04:05:06 +0000 UTC
}
func ExampleTime_MarshalBinary() {
ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z")
if err != nil {
fmt.Println(err)
}
d := Time{ti}
t, err := d.MarshalBinary()
if err != nil {
fmt.Println(err)
}
fmt.Println(string(t))
// Output: 2001-02-03T04:05:06Z
}
func ExampleTime_UnmarshalBinary() {
d := Time{}
t := "2001-02-03T04:05:06Z"
if err := d.UnmarshalBinary([]byte(t)); err != nil {
fmt.Println(err)
}
fmt.Println(d)
// Output: 2001-02-03T04:05:06Z
}
func ExampleTime_MarshalJSON() {
d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z")
if err != nil {
fmt.Println(err)
}
j, err := json.Marshal(d)
if err != nil {
fmt.Println(err)
}
fmt.Println(string(j))
// Output: "2001-02-03T04:05:06Z"
}
func ExampleTime_UnmarshalJSON() {
var d struct {
Time Time `json:"datetime"`
}
j := `{"datetime" : "2001-02-03T04:05:06Z"}`
if err := json.Unmarshal([]byte(j), &d); err != nil {
fmt.Println(err)
}
fmt.Println(d.Time)
// Output: 2001-02-03T04:05:06Z
}
func ExampleTime_MarshalText() {
d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z")
if err != nil {
fmt.Println(err)
}
t, err := d.MarshalText()
if err != nil {
fmt.Println(err)
}
fmt.Println(string(t))
// Output: 2001-02-03T04:05:06Z
}
func ExampleTime_UnmarshalText() {
d := Time{}
t := "2001-02-03T04:05:06Z"
if err := d.UnmarshalText([]byte(t)); err != nil {
fmt.Println(err)
}
fmt.Println(d)
// Output: 2001-02-03T04:05:06Z
}
func TestUnmarshalTextforInvalidDate(t *testing.T) {
d := Time{}
dt := "2001-02-03T04:05:06AAA"
if err := d.UnmarshalText([]byte(dt)); err == nil {
t.Fatalf("date: Time#Unmarshal was expecting error for invalid date")
}
}
func TestUnmarshalJSONforInvalidDate(t *testing.T) {
d := Time{}
dt := `"2001-02-03T04:05:06AAA"`
if err := d.UnmarshalJSON([]byte(dt)); err == nil {
t.Fatalf("date: Time#Unmarshal was expecting error for invalid date")
}
}
func TestTimeString(t *testing.T) {
ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z")
if err != nil {
fmt.Println(err)
}
d := Time{ti}
if d.String() != "2001-02-03T04:05:06Z" {
t.Fatalf("date: Time#String failed (%v)", d.String())
}
}
func TestTimeStringReturnsEmptyStringForError(t *testing.T) {
d := Time{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)}
if d.String() != "" {
t.Fatalf("date: Time#String failed empty string for an error")
}
}
func TestTimeBinaryRoundTrip(t *testing.T) {
ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z")
if err != nil {
t.Fatalf("date: Time#ParseTime failed (%v)", err)
}
d1 := Time{ti}
t1, err := d1.MarshalBinary()
if err != nil {
t.Fatalf("date: Time#MarshalBinary failed (%v)", err)
}
d2 := Time{}
if err = d2.UnmarshalBinary(t1); err != nil {
t.Fatalf("date: Time#UnmarshalBinary failed (%v)", err)
}
if !reflect.DeepEqual(d1, d2) {
t.Fatalf("date:Round-trip Binary failed (%v, %v)", d1, d2)
}
}
func TestTimeJSONRoundTrip(t *testing.T) {
type s struct {
Time Time `json:"datetime"`
}
ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z")
if err != nil {
t.Fatalf("date: Time#ParseTime failed (%v)", err)
}
d1 := s{Time: Time{ti}}
j, err := json.Marshal(d1)
if err != nil {
t.Fatalf("date: Time#MarshalJSON failed (%v)", err)
}
d2 := s{}
if err = json.Unmarshal(j, &d2); err != nil {
t.Fatalf("date: Time#UnmarshalJSON failed (%v)", err)
}
if !reflect.DeepEqual(d1, d2) {
t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2)
}
}
func TestTimeTextRoundTrip(t *testing.T) {
ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z")
if err != nil {
t.Fatalf("date: Time#ParseTime failed (%v)", err)
}
d1 := Time{Time: ti}
t1, err := d1.MarshalText()
if err != nil {
t.Fatalf("date: Time#MarshalText failed (%v)", err)
}
d2 := Time{}
if err = d2.UnmarshalText(t1); err != nil {
t.Fatalf("date: Time#UnmarshalText failed (%v)", err)
}
if !reflect.DeepEqual(d1, d2) {
t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2)
}
}
func TestTimeToTime(t *testing.T) {
ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z")
d := Time{ti}
if err != nil {
t.Fatalf("date: Time#ParseTime failed (%v)", err)
}
var _ time.Time = d.ToTime()
}
func TestUnmarshalJSONNoOffset(t *testing.T) {
var d struct {
Time Time `json:"datetime"`
}
j := `{"datetime" : "2001-02-03T04:05:06.789"}`
if err := json.Unmarshal([]byte(j), &d); err != nil {
t.Fatalf("date: Time#Unmarshal failed (%v)", err)
}
}
func TestUnmarshalJSONPosOffset(t *testing.T) {
var d struct {
Time Time `json:"datetime"`
}
j := `{"datetime" : "1980-01-02T00:11:35.01+01:00"}`
if err := json.Unmarshal([]byte(j), &d); err != nil {
t.Fatalf("date: Time#Unmarshal failed (%v)", err)
}
}
func TestUnmarshalJSONNegOffset(t *testing.T) {
var d struct {
Time Time `json:"datetime"`
}
j := `{"datetime" : "1492-10-12T10:15:01.789-08:00"}`
if err := json.Unmarshal([]byte(j), &d); err != nil {
t.Fatalf("date: Time#Unmarshal failed (%v)", err)
}
}
func TestUnmarshalTextNoOffset(t *testing.T) {
d := Time{}
t1 := "2001-02-03T04:05:06"
if err := d.UnmarshalText([]byte(t1)); err != nil {
t.Fatalf("date: Time#UnmarshalText failed (%v)", err)
}
}
func TestUnmarshalTextPosOffset(t *testing.T) {
d := Time{}
t1 := "2001-02-03T04:05:06+00:30"
if err := d.UnmarshalText([]byte(t1)); err != nil {
t.Fatalf("date: Time#UnmarshalText failed (%v)", err)
}
}
func TestUnmarshalTextNegOffset(t *testing.T) {
d := Time{}
t1 := "2001-02-03T04:05:06-11:00"
if err := d.UnmarshalText([]byte(t1)); err != nil {
t.Fatalf("date: Time#UnmarshalText failed (%v)", err)
}
}

View file

@ -0,0 +1,100 @@
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"time"
)
const (
rfc1123JSON = `"` + time.RFC1123 + `"`
rfc1123 = time.RFC1123
)
// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e.,
// Mon, 02 Jan 2006 15:04:05 MST).
type TimeRFC1123 struct {
time.Time
}
// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) {
t.Time, err = ParseTime(rfc1123JSON, string(data))
if err != nil {
return err
}
return nil
}
// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e.,
// Mon, 02 Jan 2006 15:04:05 MST).
func (t TimeRFC1123) MarshalJSON() ([]byte, error) {
if y := t.Year(); y < 0 || y >= 10000 {
return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]")
}
b := []byte(t.Format(rfc1123JSON))
return b, nil
}
// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
// Mon, 02 Jan 2006 15:04:05 MST).
func (t TimeRFC1123) MarshalText() ([]byte, error) {
if y := t.Year(); y < 0 || y >= 10000 {
return nil, errors.New("Time.MarshalText: year outside of range [0,9999]")
}
b := []byte(t.Format(rfc1123))
return b, nil
}
// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) {
t.Time, err = ParseTime(rfc1123, string(data))
if err != nil {
return err
}
return nil
}
// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
// Mon, 02 Jan 2006 15:04:05 MST).
func (t TimeRFC1123) MarshalBinary() ([]byte, error) {
return t.MarshalText()
}
// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
func (t *TimeRFC1123) UnmarshalBinary(data []byte) error {
return t.UnmarshalText(data)
}
// ToTime returns a Time as a time.Time
func (t TimeRFC1123) ToTime() time.Time {
return t.Time
}
// String returns the Time formatted as an RFC1123 date-time string (i.e.,
// Mon, 02 Jan 2006 15:04:05 MST).
func (t TimeRFC1123) String() string {
// Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does.
b, err := t.MarshalText()
if err != nil {
return ""
}
return string(b)
}

View file

@ -0,0 +1,226 @@
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
"reflect"
"testing"
"time"
)
func ExampleTimeRFC1123() {
d, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST")
if err != nil {
fmt.Println(err)
}
fmt.Println(d)
// Output: 2006-01-02 15:04:05 +0000 MST
}
func ExampleTimeRFC1123_MarshalBinary() {
ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST")
if err != nil {
fmt.Println(err)
}
d := TimeRFC1123{ti}
b, err := d.MarshalBinary()
if err != nil {
fmt.Println(err)
}
fmt.Println(string(b))
// Output: Mon, 02 Jan 2006 15:04:05 MST
}
func ExampleTimeRFC1123_UnmarshalBinary() {
d := TimeRFC1123{}
t := "Mon, 02 Jan 2006 15:04:05 MST"
if err := d.UnmarshalBinary([]byte(t)); err != nil {
fmt.Println(err)
}
fmt.Println(d)
// Output: Mon, 02 Jan 2006 15:04:05 MST
}
func ExampleTimeRFC1123_MarshalJSON() {
ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST")
if err != nil {
fmt.Println(err)
}
d := TimeRFC1123{ti}
j, err := json.Marshal(d)
if err != nil {
fmt.Println(err)
}
fmt.Println(string(j))
// Output: "Mon, 02 Jan 2006 15:04:05 MST"
}
func TestTimeRFC1123MarshalJSONInvalid(t *testing.T) {
ti := time.Date(20000, 01, 01, 00, 00, 00, 00, time.UTC)
d := TimeRFC1123{ti}
if _, err := json.Marshal(d); err == nil {
t.Fatalf("date: TimeRFC1123#Marshal failed for invalid date")
}
}
func ExampleTimeRFC1123_UnmarshalJSON() {
var d struct {
Time TimeRFC1123 `json:"datetime"`
}
j := `{"datetime" : "Mon, 02 Jan 2006 15:04:05 MST"}`
if err := json.Unmarshal([]byte(j), &d); err != nil {
fmt.Println(err)
}
fmt.Println(d.Time)
// Output: Mon, 02 Jan 2006 15:04:05 MST
}
func ExampleTimeRFC1123_MarshalText() {
ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z")
if err != nil {
fmt.Println(err)
}
d := TimeRFC1123{ti}
t, err := d.MarshalText()
if err != nil {
fmt.Println(err)
}
fmt.Println(string(t))
// Output: Sat, 03 Feb 2001 04:05:06 UTC
}
func ExampleTimeRFC1123_UnmarshalText() {
d := TimeRFC1123{}
t := "Sat, 03 Feb 2001 04:05:06 UTC"
if err := d.UnmarshalText([]byte(t)); err != nil {
fmt.Println(err)
}
fmt.Println(d)
// Output: Sat, 03 Feb 2001 04:05:06 UTC
}
func TestUnmarshalJSONforInvalidDateRfc1123(t *testing.T) {
dt := `"Mon, 02 Jan 2000000 15:05 MST"`
d := TimeRFC1123{}
if err := d.UnmarshalJSON([]byte(dt)); err == nil {
t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date")
}
}
func TestUnmarshalTextforInvalidDateRfc1123(t *testing.T) {
dt := "Mon, 02 Jan 2000000 15:05 MST"
d := TimeRFC1123{}
if err := d.UnmarshalText([]byte(dt)); err == nil {
t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date")
}
}
func TestTimeStringRfc1123(t *testing.T) {
ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST")
if err != nil {
fmt.Println(err)
}
d := TimeRFC1123{ti}
if d.String() != "Mon, 02 Jan 2006 15:04:05 MST" {
t.Fatalf("date: TimeRFC1123#String failed (%v)", d.String())
}
}
func TestTimeStringReturnsEmptyStringForErrorRfc1123(t *testing.T) {
d := TimeRFC1123{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)}
if d.String() != "" {
t.Fatalf("date: TimeRFC1123#String failed empty string for an error")
}
}
func TestTimeBinaryRoundTripRfc1123(t *testing.T) {
ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z")
if err != nil {
t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err)
}
d1 := TimeRFC1123{ti}
t1, err := d1.MarshalBinary()
if err != nil {
t.Fatalf("date: TimeRFC1123#MarshalBinary failed (%v)", err)
}
d2 := TimeRFC1123{}
if err = d2.UnmarshalBinary(t1); err != nil {
t.Fatalf("date: TimeRFC1123#UnmarshalBinary failed (%v)", err)
}
if !reflect.DeepEqual(d1, d2) {
t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2)
}
}
func TestTimeJSONRoundTripRfc1123(t *testing.T) {
type s struct {
Time TimeRFC1123 `json:"datetime"`
}
var err error
ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST")
if err != nil {
t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err)
}
d1 := s{Time: TimeRFC1123{ti}}
j, err := json.Marshal(d1)
if err != nil {
t.Fatalf("date: TimeRFC1123#MarshalJSON failed (%v)", err)
}
d2 := s{}
if err = json.Unmarshal(j, &d2); err != nil {
t.Fatalf("date: TimeRFC1123#UnmarshalJSON failed (%v)", err)
}
if !reflect.DeepEqual(d1, d2) {
t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2)
}
}
func TestTimeTextRoundTripRfc1123(t *testing.T) {
ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST")
if err != nil {
t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err)
}
d1 := TimeRFC1123{Time: ti}
t1, err := d1.MarshalText()
if err != nil {
t.Fatalf("date: TimeRFC1123#MarshalText failed (%v)", err)
}
d2 := TimeRFC1123{}
if err = d2.UnmarshalText(t1); err != nil {
t.Fatalf("date: TimeRFC1123#UnmarshalText failed (%v)", err)
}
if !reflect.DeepEqual(d1, d2) {
t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2)
}
}
func TestTimeToTimeRFC1123(t *testing.T) {
ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST")
d := TimeRFC1123{ti}
if err != nil {
t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err)
}
var _ time.Time = d.ToTime()
}

View file

@ -0,0 +1,123 @@
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/binary"
"encoding/json"
"time"
)
// unixEpoch is the moment in time that should be treated as timestamp 0.
var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC)
// UnixTime marshals and unmarshals a time that is represented as the number
// of seconds (ignoring skip-seconds) since the Unix Epoch.
type UnixTime time.Time
// Duration returns the time as a Duration since the UnixEpoch.
func (t UnixTime) Duration() time.Duration {
return time.Time(t).Sub(unixEpoch)
}
// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch.
func NewUnixTimeFromSeconds(seconds float64) UnixTime {
return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second)))
}
// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch.
func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime {
return NewUnixTimeFromDuration(time.Duration(nanoseconds))
}
// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch.
func NewUnixTimeFromDuration(dur time.Duration) UnixTime {
return UnixTime(unixEpoch.Add(dur))
}
// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0'
func UnixEpoch() time.Time {
return unixEpoch
}
// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements.
// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.)
func (t UnixTime) MarshalJSON() ([]byte, error) {
buffer := &bytes.Buffer{}
enc := json.NewEncoder(buffer)
err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9)
if err != nil {
return nil, err
}
return buffer.Bytes(), nil
}
// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since
// midnight January 1st, 1970.
func (t *UnixTime) UnmarshalJSON(text []byte) error {
dec := json.NewDecoder(bytes.NewReader(text))
var secondsSinceEpoch float64
if err := dec.Decode(&secondsSinceEpoch); err != nil {
return err
}
*t = NewUnixTimeFromSeconds(secondsSinceEpoch)
return nil
}
// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number.
func (t UnixTime) MarshalText() ([]byte, error) {
cast := time.Time(t)
return cast.MarshalText()
}
// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch.
func (t *UnixTime) UnmarshalText(raw []byte) error {
var unmarshaled time.Time
if err := unmarshaled.UnmarshalText(raw); err != nil {
return err
}
*t = UnixTime(unmarshaled)
return nil
}
// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch.
func (t UnixTime) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
payload := int64(t.Duration())
if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime.
func (t *UnixTime) UnmarshalBinary(raw []byte) error {
var nanosecondsSinceEpoch int64
if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil {
return err
}
*t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch)
return nil
}

View file

@ -0,0 +1,283 @@
// +build go1.7
package date
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"math"
"testing"
"time"
)
func ExampleUnixTime_MarshalJSON() {
epoch := UnixTime(UnixEpoch())
text, _ := json.Marshal(epoch)
fmt.Print(string(text))
// Output: 0
}
func ExampleUnixTime_UnmarshalJSON() {
var myTime UnixTime
json.Unmarshal([]byte("1.3e2"), &myTime)
fmt.Printf("%v", time.Time(myTime))
// Output: 1970-01-01 00:02:10 +0000 UTC
}
func TestUnixTime_MarshalJSON(t *testing.T) {
testCases := []time.Time{
UnixEpoch().Add(-1 * time.Second), // One second befote the Unix Epoch
time.Date(2017, time.April, 14, 20, 27, 47, 0, time.UTC), // The time this test was written
UnixEpoch(),
time.Date(1800, 01, 01, 0, 0, 0, 0, time.UTC),
time.Date(2200, 12, 29, 00, 01, 37, 82, time.UTC),
}
for _, tc := range testCases {
t.Run(tc.String(), func(subT *testing.T) {
var actual, expected float64
var marshaled []byte
target := UnixTime(tc)
expected = float64(target.Duration().Nanoseconds()) / 1e9
if temp, err := json.Marshal(target); err == nil {
marshaled = temp
} else {
subT.Error(err)
return
}
dec := json.NewDecoder(bytes.NewReader(marshaled))
if err := dec.Decode(&actual); err != nil {
subT.Error(err)
return
}
diff := math.Abs(actual - expected)
subT.Logf("\ngot :\t%g\nwant:\t%g\ndiff:\t%g", actual, expected, diff)
if diff > 1e-9 { //Must be within 1 nanosecond of one another
subT.Fail()
}
})
}
}
func TestUnixTime_UnmarshalJSON(t *testing.T) {
testCases := []struct {
text string
expected time.Time
}{
{"1", UnixEpoch().Add(time.Second)},
{"0", UnixEpoch()},
{"1492203742", time.Date(2017, time.April, 14, 21, 02, 22, 0, time.UTC)}, // The time this test was written
{"-1", time.Date(1969, time.December, 31, 23, 59, 59, 0, time.UTC)},
{"1.5", UnixEpoch().Add(1500 * time.Millisecond)},
{"0e1", UnixEpoch()}, // See http://json.org for 'number' format definition.
{"1.3e+2", UnixEpoch().Add(130 * time.Second)},
{"1.6E-10", UnixEpoch()}, // This is so small, it should get truncated into the UnixEpoch
{"2E-6", UnixEpoch().Add(2 * time.Microsecond)},
{"1.289345e9", UnixEpoch().Add(1289345000 * time.Second)},
{"1e-9", UnixEpoch().Add(time.Nanosecond)},
}
for _, tc := range testCases {
t.Run(tc.text, func(subT *testing.T) {
var rehydrated UnixTime
if err := json.Unmarshal([]byte(tc.text), &rehydrated); err != nil {
subT.Error(err)
return
}
if time.Time(rehydrated) != tc.expected {
subT.Logf("\ngot: \t%v\nwant:\t%v\ndiff:\t%v", time.Time(rehydrated), tc.expected, time.Time(rehydrated).Sub(tc.expected))
subT.Fail()
}
})
}
}
func TestUnixTime_JSONRoundTrip(t *testing.T) {
testCases := []time.Time{
UnixEpoch(),
time.Date(2005, time.November, 5, 0, 0, 0, 0, time.UTC), // The day V for Vendetta (film) was released.
UnixEpoch().Add(-6 * time.Second),
UnixEpoch().Add(800 * time.Hour),
UnixEpoch().Add(time.Nanosecond),
time.Date(2015, time.September, 05, 4, 30, 12, 9992, time.UTC),
}
for _, tc := range testCases {
t.Run(tc.String(), func(subT *testing.T) {
subject := UnixTime(tc)
var marshaled []byte
if temp, err := json.Marshal(subject); err == nil {
marshaled = temp
} else {
subT.Error(err)
return
}
var unmarshaled UnixTime
if err := json.Unmarshal(marshaled, &unmarshaled); err != nil {
subT.Error(err)
}
actual := time.Time(unmarshaled)
diff := actual.Sub(tc)
subT.Logf("\ngot :\t%s\nwant:\t%s\ndiff:\t%s", actual.String(), tc.String(), diff.String())
if diff > time.Duration(100) { // We lose some precision be working in floats. We shouldn't lose more than 100 nanoseconds.
subT.Fail()
}
})
}
}
func TestUnixTime_MarshalBinary(t *testing.T) {
testCases := []struct {
expected int64
subject time.Time
}{
{0, UnixEpoch()},
{-15 * int64(time.Second), UnixEpoch().Add(-15 * time.Second)},
{54, UnixEpoch().Add(54 * time.Nanosecond)},
}
for _, tc := range testCases {
t.Run("", func(subT *testing.T) {
var marshaled []byte
if temp, err := UnixTime(tc.subject).MarshalBinary(); err == nil {
marshaled = temp
} else {
subT.Error(err)
return
}
var unmarshaled int64
if err := binary.Read(bytes.NewReader(marshaled), binary.LittleEndian, &unmarshaled); err != nil {
subT.Error(err)
return
}
if unmarshaled != tc.expected {
subT.Logf("\ngot: \t%d\nwant:\t%d", unmarshaled, tc.expected)
subT.Fail()
}
})
}
}
func TestUnixTime_BinaryRoundTrip(t *testing.T) {
testCases := []time.Time{
UnixEpoch(),
UnixEpoch().Add(800 * time.Minute),
UnixEpoch().Add(7 * time.Hour),
UnixEpoch().Add(-1 * time.Nanosecond),
}
for _, tc := range testCases {
t.Run(tc.String(), func(subT *testing.T) {
original := UnixTime(tc)
var marshaled []byte
if temp, err := original.MarshalBinary(); err == nil {
marshaled = temp
} else {
subT.Error(err)
return
}
var traveled UnixTime
if err := traveled.UnmarshalBinary(marshaled); err != nil {
subT.Error(err)
return
}
if traveled != original {
subT.Logf("\ngot: \t%s\nwant:\t%s", time.Time(original).String(), time.Time(traveled).String())
subT.Fail()
}
})
}
}
func TestUnixTime_MarshalText(t *testing.T) {
testCases := []time.Time{
UnixEpoch(),
UnixEpoch().Add(45 * time.Second),
UnixEpoch().Add(time.Nanosecond),
UnixEpoch().Add(-100000 * time.Second),
}
for _, tc := range testCases {
expected, _ := tc.MarshalText()
t.Run("", func(subT *testing.T) {
var marshaled []byte
if temp, err := UnixTime(tc).MarshalText(); err == nil {
marshaled = temp
} else {
subT.Error(err)
return
}
if string(marshaled) != string(expected) {
subT.Logf("\ngot: \t%s\nwant:\t%s", string(marshaled), string(expected))
subT.Fail()
}
})
}
}
func TestUnixTime_TextRoundTrip(t *testing.T) {
testCases := []time.Time{
UnixEpoch(),
UnixEpoch().Add(-1 * time.Nanosecond),
UnixEpoch().Add(1 * time.Nanosecond),
time.Date(2017, time.April, 17, 21, 00, 00, 00, time.UTC),
}
for _, tc := range testCases {
t.Run(tc.String(), func(subT *testing.T) {
unixTC := UnixTime(tc)
var marshaled []byte
if temp, err := unixTC.MarshalText(); err == nil {
marshaled = temp
} else {
subT.Error(err)
return
}
var unmarshaled UnixTime
if err := unmarshaled.UnmarshalText(marshaled); err != nil {
subT.Error(err)
return
}
if unmarshaled != unixTC {
t.Logf("\ngot: \t%s\nwant:\t%s", time.Time(unmarshaled).String(), tc.String())
t.Fail()
}
})
}
}

Some files were not shown because too many files have changed in this diff Show more