Replace godep with dep
This commit is contained in:
parent
1e7489927c
commit
bf5616c65b
14883 changed files with 3937406 additions and 361781 deletions
|
@ -11,7 +11,7 @@ notifications:
|
|||
go:
|
||||
- 1.9
|
||||
|
||||
go_import_path: k8s.io/ingress
|
||||
go_import_path: k8s.io/ingress-nginx
|
||||
|
||||
jobs:
|
||||
include:
|
||||
|
|
1429
Godeps/Godeps.json
generated
1429
Godeps/Godeps.json
generated
File diff suppressed because it is too large
Load diff
351
Gopkg.lock
generated
Normal file
351
Gopkg.lock
generated
Normal file
|
@ -0,0 +1,351 @@
|
|||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/PuerkitoBio/purell"
|
||||
packages = ["."]
|
||||
revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/PuerkitoBio/urlesc"
|
||||
packages = ["."]
|
||||
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/armon/go-proxyproto"
|
||||
packages = ["."]
|
||||
revision = "48572f11356f1843b694f21a290d4f1006bc5e47"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/emicklei/go-restful"
|
||||
packages = [".","log"]
|
||||
revision = "5741799b275a3c4a5a9623a993576d7545cf7b5c"
|
||||
version = "v2.4.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/emicklei/go-restful-swagger12"
|
||||
packages = ["."]
|
||||
revision = "dcef7f55730566d41eae5db10e7d6981829720f6"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/fullsailor/pkcs7"
|
||||
packages = ["."]
|
||||
revision = "a009d8d7de53d9503c797cb8ec66fa3b21eed209"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/ghodss/yaml"
|
||||
packages = ["."]
|
||||
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/jsonpointer"
|
||||
packages = ["."]
|
||||
revision = "779f45308c19820f1a69e9a4cd965f496e0da10f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/jsonreference"
|
||||
packages = ["."]
|
||||
revision = "36d33bfe519efae5632669801b180bf1a245da3b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/spec"
|
||||
packages = ["."]
|
||||
revision = "48c2a7185575f9103a5a3863eff950bb776899d2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/swag"
|
||||
packages = ["."]
|
||||
revision = "f3f9494671f93fcff853e3c6e9e948b3eb71e590"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = ["proto","sortkeys"]
|
||||
revision = "100ba4e885062801d56799d78530b73b178a78f3"
|
||||
version = "v0.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/glog"
|
||||
packages = ["."]
|
||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/groupcache"
|
||||
packages = ["lru"]
|
||||
revision = "b710c8433bd175204919eb38776e944233235d03"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
|
||||
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
revision = "316fb6d3f031ae8f4d457c6c5186b9e3ded70435"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/gofuzz"
|
||||
packages = ["."]
|
||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gnostic"
|
||||
packages = ["OpenAPIv2","compiler","extensions"]
|
||||
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [".","diskcache"]
|
||||
revision = "c1f8028e62adb3d518b823a2f8e6a95c38bdd3aa"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [".","simplelru"]
|
||||
revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/howeyc/gopass"
|
||||
packages = ["."]
|
||||
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/imdario/mergo"
|
||||
packages = ["."]
|
||||
revision = "3e95a51e0639b4cf372f2ccf74c86749d747fbdc"
|
||||
version = "0.2.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
revision = "6ed27152e0428abfde127acb33b08b03a1e67cac"
|
||||
version = "1.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/juju/ratelimit"
|
||||
packages = ["."]
|
||||
revision = "5b9ff866471762aa2ab2dced63c9fb6f53921342"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kylelemons/godebug"
|
||||
packages = ["diff","pretty"]
|
||||
revision = "d65d576e9348f5982d7f6d83682b694e731a45c6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mailru/easyjson"
|
||||
packages = ["buffer","jlexer","jwriter"]
|
||||
revision = "2a92e673c9a6302dd05c3a691ae1f24aef46457d"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/go-ps"
|
||||
packages = ["."]
|
||||
revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
revision = "d0303fe809921458f417bcf828397a65db30a7e4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/ncabatoff/process-exporter"
|
||||
packages = [".","proc"]
|
||||
revision = "ae9193ff5f4d34dd62dc2fa194453ea73d7e64ee"
|
||||
version = "0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/paultag/sniff"
|
||||
packages = ["parser"]
|
||||
revision = "87325c3dddf408cfb71f5044873d34ac426d5a59"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pborman/uuid"
|
||||
packages = ["."]
|
||||
revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53"
|
||||
version = "v1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = ["prometheus","prometheus/promhttp"]
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "6f3806018612930941127f2a7c6c453ba2c527d2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"]
|
||||
revision = "1bab55dd05dbff384524a6a1c99006d9eb5f139b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [".","xfs"]
|
||||
revision = "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/zakjan/cert-chain-resolver"
|
||||
packages = ["certUtil"]
|
||||
revision = "c222fb53d84f5c835aab8027b5d422d4089f9d29"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","http2","http2/hpack","idna","lex/httplex"]
|
||||
revision = "a04bdaca5b32abe1c069418fb7088ae607de5bd0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "ebfc5b4631820b793c9010c87fd8fef0f39eb082"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
|
||||
revision = "825fc78a2fd6fa0a5447e300189e3219e05e1f25"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
packages = ["."]
|
||||
revision = "629574ca2a5df945712d3079857300b5e4da0236"
|
||||
version = "v1.4.2"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/go-playground/pool.v3"
|
||||
packages = ["."]
|
||||
revision = "e73cd3a5ded835540c5cf4778488579c5b357d68"
|
||||
version = "v3.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/inf.v0"
|
||||
packages = ["."]
|
||||
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
|
||||
version = "v0.9.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/api"
|
||||
packages = ["admissionregistration/v1alpha1","apps/v1beta1","apps/v1beta2","authentication/v1","authentication/v1beta1","authorization/v1","authorization/v1beta1","autoscaling/v1","autoscaling/v2beta1","batch/v1","batch/v1beta1","batch/v2alpha1","certificates/v1beta1","core/v1","extensions/v1beta1","networking/v1","policy/v1beta1","rbac/v1","rbac/v1alpha1","rbac/v1beta1","scheduling/v1alpha1","settings/v1alpha1","storage/v1","storage/v1beta1"]
|
||||
revision = "81aa34336d28aadc3a8e8da7dfd9258c5157e5e4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/apimachinery","pkg/apimachinery/announced","pkg/apimachinery/registered","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/intstr","pkg/util/json","pkg/util/mergepatch","pkg/util/net","pkg/util/runtime","pkg/util/sets","pkg/util/strategicpatch","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/json","third_party/forked/golang/reflect"]
|
||||
revision = "3b05bbfa0a45413bfa184edbf9af617e277962fb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/apiserver"
|
||||
packages = ["pkg/server/healthz"]
|
||||
revision = "c1e53d745d0fe45bf7d5d44697e6eface25fceca"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/client-go"
|
||||
packages = ["discovery","discovery/fake","kubernetes","kubernetes/fake","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/admissionregistration/v1alpha1/fake","kubernetes/typed/apps/v1beta1","kubernetes/typed/apps/v1beta1/fake","kubernetes/typed/apps/v1beta2","kubernetes/typed/apps/v1beta2/fake","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1/fake","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authentication/v1beta1/fake","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1/fake","kubernetes/typed/authorization/v1beta1","kubernetes/typed/authorization/v1beta1/fake","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v1/fake","kubernetes/typed/autoscaling/v2beta1","kubernetes/typed/autoscaling/v2beta1/fake","kubernetes/typed/batch/v1","kubernetes/typed/batch/v1/fake","kubernetes/typed/batch/v1beta1","kubernetes/typed/batch/v1beta1/fake","kubernetes/typed/batch/v2alpha1","kubernetes/typed/batch/v2alpha1/fake","kubernetes/typed/certificates/v1beta1","kubernetes/typed/certificates/v1beta1/fake","kubernetes/typed/core/v1","kubernetes/typed/core/v1/fake","kubernetes/typed/extensions/v1beta1","kubernetes/typed/extensions/v1beta1/fake","kubernetes/typed/networking/v1","kubernetes/typed/networking/v1/fake","kubernetes/typed/policy/v1beta1","kubernetes/typed/policy/v1beta1/fake","kubernetes/typed/rbac/v1","kubernetes/typed/rbac/v1/fake","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1alpha1/fake","kubernetes/typed/rbac/v1beta1","kubernetes/typed/rbac/v1beta1/fake","kubernetes/typed/scheduling/v1alpha1","kubernetes/typed/scheduling/v1alpha1/fake","kubernetes/typed/settings/v1alpha1","kubernetes/typed/settings/v1alpha1/fake","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1/fake","kubernetes/typed/storage/v1beta1","kubernetes/typed/storage/v1beta1/fake","pkg/version","rest","rest/watch","testing","tools/auth","tools/cache","tools/cache/testing","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/leaderelection","tools/leaderelection/resourcelock","tools/metrics","tools/pager","tools/record","tools/reference","transport","util/cert","util/cert/triple","util/flowcontrol","util/homedir","util/integer","util/workqueue"]
|
||||
revision = "82aa063804cf055e16e8911250f888bc216e8b61"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = ["pkg/common"]
|
||||
revision = "abfc5fbe1cf87ee697db107fdfd24c32fe4397a8"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/kubernetes"
|
||||
packages = ["pkg/api","pkg/util/sysctl"]
|
||||
revision = "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4"
|
||||
version = "v1.8.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "e17f9cc4f1307ae1c3c913743c5561b3c1836d1a04d5c49c8b98727ef33b9646"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
102
Gopkg.toml
Normal file
102
Gopkg.toml
Normal file
|
@ -0,0 +1,102 @@
|
|||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/armon/go-proxyproto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/glog"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/imdario/mergo"
|
||||
version = "0.2.2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/kylelemons/godebug"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/go-ps"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/ncabatoff/process-exporter"
|
||||
version = "0.1.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/paultag/sniff"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pborman/uuid"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/errors"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/pflag"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/zakjan/cert-chain-resolver"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
version = "1.4.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/go-playground/pool.v3"
|
||||
version = "3.1.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "k8s.io/api"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "k8s.io/apimachinery"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "k8s.io/apiserver"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "k8s.io/client-go"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
version = "1.8.0"
|
|
@ -1,121 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
|
||||
nginxconfig "k8s.io/ingress-nginx/config"
|
||||
"k8s.io/ingress-nginx/ingress"
|
||||
"k8s.io/ingress-nginx/ingress/controller"
|
||||
"k8s.io/ingress-nginx/ingress/defaults"
|
||||
)
|
||||
|
||||
func main() {
|
||||
dc := newDummyController()
|
||||
ic := controller.NewIngressController(dc)
|
||||
defer func() {
|
||||
log.Printf("Shutting down ingress controller...")
|
||||
ic.Stop()
|
||||
}()
|
||||
ic.Start()
|
||||
}
|
||||
|
||||
func newDummyController() ingress.Controller {
|
||||
return &DummyController{}
|
||||
}
|
||||
|
||||
type DummyController struct{}
|
||||
|
||||
func (dc DummyController) SetConfig(cfgMap *api.ConfigMap) {
|
||||
log.Printf("Config map %+v", cfgMap)
|
||||
}
|
||||
|
||||
func (dc DummyController) Test(file string) *exec.Cmd {
|
||||
return exec.Command("echo", file)
|
||||
}
|
||||
|
||||
func (dc DummyController) OnUpdate(updatePayload ingress.Configuration) error {
|
||||
log.Printf("Received OnUpdate notification")
|
||||
for _, b := range updatePayload.Backends {
|
||||
eps := []string{}
|
||||
for _, e := range b.Endpoints {
|
||||
eps = append(eps, e.Address)
|
||||
}
|
||||
log.Printf("%v: %v", b.Name, strings.Join(eps, ", "))
|
||||
}
|
||||
|
||||
log.Printf("Reloaded new config")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dc DummyController) BackendDefaults() defaults.Backend {
|
||||
// Just adopt nginx's default backend config
|
||||
return nginxconfig.NewDefault().Backend
|
||||
}
|
||||
|
||||
func (n DummyController) Name() string {
|
||||
return "dummy Controller"
|
||||
}
|
||||
|
||||
func (n DummyController) Check(_ *http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dc DummyController) Info() *ingress.BackendInfo {
|
||||
return &ingress.BackendInfo{
|
||||
Name: "dummy",
|
||||
Release: "0.0.0",
|
||||
Build: "git-00000000",
|
||||
Repository: "git://foo.bar.com",
|
||||
}
|
||||
}
|
||||
|
||||
func (n DummyController) ConfigureFlags(*pflag.FlagSet) {
|
||||
}
|
||||
|
||||
func (n DummyController) OverrideFlags(*pflag.FlagSet) {
|
||||
}
|
||||
|
||||
func (n DummyController) SetListers(lister *ingress.StoreLister) {
|
||||
|
||||
}
|
||||
|
||||
func (n DummyController) DefaultIngressClass() string {
|
||||
return "dummy"
|
||||
}
|
||||
|
||||
func (n DummyController) UpdateIngressStatus(*extensions.Ingress) []api.LoadBalancerIngress {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DefaultEndpoint returns the default endpoint to be use as default server that returns 404.
|
||||
func (n DummyController) DefaultEndpoint() ingress.Endpoint {
|
||||
return ingress.Endpoint{
|
||||
Address: "127.0.0.1",
|
||||
Port: "8181",
|
||||
Target: &api.ObjectReference{},
|
||||
}
|
||||
}
|
15
vendor/cloud.google.com/go/AUTHORS
generated
vendored
15
vendor/cloud.google.com/go/AUTHORS
generated
vendored
|
@ -1,15 +0,0 @@
|
|||
# This is the official list of cloud authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Filippo Valsorda <hi@filippo.io>
|
||||
Google Inc.
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
Palm Stone Games, Inc.
|
||||
Paweł Knap <pawelknap88@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
34
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
34
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
|
@ -1,34 +0,0 @@
|
|||
# People who have agreed to one of the CLAs and can contribute patches.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# https://developers.google.com/open-source/cla/individual
|
||||
# https://developers.google.com/open-source/cla/corporate
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name <email address>
|
||||
|
||||
# Keep the list alphabetically sorted.
|
||||
|
||||
Andreas Litt <andreas.litt@gmail.com>
|
||||
Andrew Gerrand <adg@golang.org>
|
||||
Brad Fitzpatrick <bradfitz@golang.org>
|
||||
Burcu Dogan <jbd@google.com>
|
||||
Dave Day <djd@golang.org>
|
||||
David Sansome <me@davidsansome.com>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
Filippo Valsorda <hi@filippo.io>
|
||||
Glenn Lewis <gmlewis@google.com>
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Jonathan Amsterdam <jba@google.com>
|
||||
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||
Michael McGreevy <mcgreevy@golang.org>
|
||||
Omar Jarjur <ojarjur@google.com>
|
||||
Paweł Knap <pawelknap88@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Sarah Adams <shadams@google.com>
|
||||
Toby Burress <kurin@google.com>
|
||||
Tuo Shan <shantuo@google.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2014 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
438
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
438
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
|
@ -1,438 +0,0 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package metadata provides access to Google Compute Engine (GCE)
|
||||
// metadata and API service accounts.
|
||||
//
|
||||
// This package is a wrapper around the GCE metadata service,
|
||||
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
// metadataIP is the documented metadata server IP address.
|
||||
metadataIP = "169.254.169.254"
|
||||
|
||||
// metadataHostEnv is the environment variable specifying the
|
||||
// GCE metadata hostname. If empty, the default value of
|
||||
// metadataIP ("169.254.169.254") is used instead.
|
||||
// This is variable name is not defined by any spec, as far as
|
||||
// I know; it was made up for the Go package.
|
||||
metadataHostEnv = "GCE_METADATA_HOST"
|
||||
)
|
||||
|
||||
type cachedValue struct {
|
||||
k string
|
||||
trim bool
|
||||
mu sync.Mutex
|
||||
v string
|
||||
}
|
||||
|
||||
var (
|
||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||
instID = &cachedValue{k: "instance/id", trim: true}
|
||||
)
|
||||
|
||||
var (
|
||||
metaClient = &http.Client{
|
||||
Transport: &internal.Transport{
|
||||
Base: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
},
|
||||
}
|
||||
subscribeClient = &http.Client{
|
||||
Transport: &internal.Transport{
|
||||
Base: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
//
|
||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||
//
|
||||
// This error is not returned if the value is defined to be the empty
|
||||
// string.
|
||||
type NotDefinedError string
|
||||
|
||||
func (suffix NotDefinedError) Error() string {
|
||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||
}
|
||||
|
||||
// Get returns a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||
// 169.254.169.254 will be used instead.
|
||||
//
|
||||
// If the requested metadata is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
func Get(suffix string) (string, error) {
|
||||
val, _, err := getETag(metaClient, suffix)
|
||||
return val, err
|
||||
}
|
||||
|
||||
// getETag returns a value from the metadata service as well as the associated
|
||||
// ETag using the provided client. This func is otherwise equivalent to Get.
|
||||
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||
// requests shall go.
|
||||
host := os.Getenv(metadataHostEnv)
|
||||
if host == "" {
|
||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||
// binaries built with the "netgo" tag and without cgo won't
|
||||
// know the search suffix for "metadata" is
|
||||
// ".google.internal", and this IP address is documented as
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
func getTrimmed(suffix string) (s string, err error) {
|
||||
s, err = Get(suffix)
|
||||
s = strings.TrimSpace(s)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cachedValue) get() (v string, err error) {
|
||||
defer c.mu.Unlock()
|
||||
c.mu.Lock()
|
||||
if c.v != "" {
|
||||
return c.v, nil
|
||||
}
|
||||
if c.trim {
|
||||
v, err = getTrimmed(c.k)
|
||||
} else {
|
||||
v, err = Get(c.k)
|
||||
}
|
||||
if err == nil {
|
||||
c.v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
onGCEOnce sync.Once
|
||||
onGCE bool
|
||||
)
|
||||
|
||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||
func OnGCE() bool {
|
||||
onGCEOnce.Do(initOnGCE)
|
||||
return onGCE
|
||||
}
|
||||
|
||||
func initOnGCE() {
|
||||
onGCE = testOnGCE()
|
||||
}
|
||||
|
||||
func testOnGCE() bool {
|
||||
// The user explicitly said they're on GCE, so trust them.
|
||||
if os.Getenv(metadataHostEnv) != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
resc := make(chan bool, 2)
|
||||
|
||||
// Try two strategies in parallel.
|
||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||
go func() {
|
||||
res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
||||
}()
|
||||
|
||||
go func() {
|
||||
addrs, err := net.LookupHost("metadata.google.internal")
|
||||
if err != nil || len(addrs) == 0 {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
resc <- strsContains(addrs, metadataIP)
|
||||
}()
|
||||
|
||||
tryHarder := systemInfoSuggestsGCE()
|
||||
if tryHarder {
|
||||
res := <-resc
|
||||
if res {
|
||||
// The first strategy succeeded, so let's use it.
|
||||
return true
|
||||
}
|
||||
// Wait for either the DNS or metadata server probe to
|
||||
// contradict the other one and say we are running on
|
||||
// GCE. Give it a lot of time to do so, since the system
|
||||
// info already suggests we're running on a GCE BIOS.
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case res = <-resc:
|
||||
return res
|
||||
case <-timer.C:
|
||||
// Too slow. Who knows what this system is.
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// There's no hint from the system info that we're running on
|
||||
// GCE, so use the first probe's result as truth, whether it's
|
||||
// true or false. The goal here is to optimize for speed for
|
||||
// users who are NOT running on GCE. We can't assume that
|
||||
// either a DNS lookup or an HTTP request to a blackholed IP
|
||||
// address is fast. Worst case this should return when the
|
||||
// metaClient's Transport.ResponseHeaderTimeout or
|
||||
// Transport.Dial.Timeout fires (in two seconds).
|
||||
return <-resc
|
||||
}
|
||||
|
||||
// systemInfoSuggestsGCE reports whether the local system (without
|
||||
// doing network requests) suggests that we're running on GCE. If this
|
||||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
||||
// server.
|
||||
func systemInfoSuggestsGCE() bool {
|
||||
if runtime.GOOS != "linux" {
|
||||
// We don't have any non-Linux clues available, at least yet.
|
||||
return false
|
||||
}
|
||||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
||||
name := strings.TrimSpace(string(slurp))
|
||||
return name == "Google" || name == "Google Compute Engine"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
// The suffix may contain query parameters.
|
||||
//
|
||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||
// is deleted. Subscribe returns the error value returned from the last call to
|
||||
// fn, which may be nil when ok == false.
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
const failedSubscribeSleep = time.Second * 5
|
||||
|
||||
// First check to see if the metadata value exists at all.
|
||||
val, lastETag, err := getETag(subscribeClient, suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fn(val, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok := true
|
||||
if strings.ContainsRune(suffix, '?') {
|
||||
suffix += "&wait_for_change=true&last_etag="
|
||||
} else {
|
||||
suffix += "?wait_for_change=true&last_etag="
|
||||
}
|
||||
for {
|
||||
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
|
||||
if err != nil {
|
||||
if _, deleted := err.(NotDefinedError); !deleted {
|
||||
time.Sleep(failedSubscribeSleep)
|
||||
continue // Retry on other errors.
|
||||
}
|
||||
ok = false
|
||||
}
|
||||
lastETag = etag
|
||||
|
||||
if err := fn(val, ok); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func ProjectID() (string, error) { return projID.get() }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func NumericProjectID() (string, error) { return projNum.get() }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func InternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
}
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) {
|
||||
return getTrimmed("instance/hostname")
|
||||
}
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func InstanceTags() ([]string, error) {
|
||||
var s []string
|
||||
j, err := Get("instance/tags")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func InstanceID() (string, error) {
|
||||
return instID.get()
|
||||
}
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func InstanceName() (string, error) {
|
||||
host, err := Hostname()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Split(host, ".")[0], nil
|
||||
}
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func Zone() (string, error) {
|
||||
zone, err := getTrimmed("instance/zone")
|
||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||
}
|
||||
|
||||
// InstanceAttributes returns the list of user-defined attributes,
|
||||
// assigned when initially creating a GCE VM instance. The value of an
|
||||
// attribute can be obtained with InstanceAttributeValue.
|
||||
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
|
||||
|
||||
// ProjectAttributes returns the list of user-defined attributes
|
||||
// applying to the project as a whole, not just this VM. The value of
|
||||
// an attribute can be obtained with ProjectAttributeValue.
|
||||
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
|
||||
|
||||
func lines(suffix string) ([]string, error) {
|
||||
j, err := Get(suffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||
for i := range s {
|
||||
s[i] = strings.TrimSpace(s[i])
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceAttributeValue returns the value of the provided VM
|
||||
// instance attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func InstanceAttributeValue(attr string) (string, error) {
|
||||
return Get("instance/attributes/" + attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue returns the value of the provided
|
||||
// project attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func ProjectAttributeValue(attr string) (string, error) {
|
||||
return Get("project/attributes/" + attr)
|
||||
}
|
||||
|
||||
// Scopes returns the service account scopes for the given account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func Scopes(serviceAccount string) ([]string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||
}
|
||||
|
||||
func strsContains(ss []string, s string) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
64
vendor/cloud.google.com/go/internal/cloud.go
generated
vendored
64
vendor/cloud.google.com/go/internal/cloud.go
generated
vendored
|
@ -1,64 +0,0 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package internal provides support for the cloud packages.
|
||||
//
|
||||
// Users should not import this package directly.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const userAgent = "gcloud-golang/0.1"
|
||||
|
||||
// Transport is an http.RoundTripper that appends Google Cloud client's
|
||||
// user-agent to the original request's user-agent header.
|
||||
type Transport struct {
|
||||
// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
|
||||
// Do User-Agent some other way.
|
||||
|
||||
// Base is the actual http.RoundTripper
|
||||
// requests will use. It must not be nil.
|
||||
Base http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip appends a user-agent to the existing user-agent
|
||||
// header and delegates the request to the base http.RoundTripper.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
req = cloneRequest(req)
|
||||
ua := req.Header.Get("User-Agent")
|
||||
if ua == "" {
|
||||
ua = userAgent
|
||||
} else {
|
||||
ua = fmt.Sprintf("%s %s", ua, userAgent)
|
||||
}
|
||||
req.Header.Set("User-Agent", ua)
|
||||
return t.Base.RoundTrip(req)
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
return r2
|
||||
}
|
2
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
2
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
|
@ -12,6 +12,7 @@ Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
|
|||
|
||||
## Changelog
|
||||
|
||||
* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
|
||||
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
|
||||
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
|
||||
* **v0.2.0** : Add benchmarks, Attempt IDN support.
|
||||
|
@ -172,6 +173,7 @@ And with `FlagsUnsafeGreedy`:
|
|||
@opennota
|
||||
@pchristopher1275
|
||||
@zenovich
|
||||
@beeker1121
|
||||
|
||||
## License
|
||||
|
||||
|
|
57
vendor/github.com/PuerkitoBio/purell/bench_test.go
generated
vendored
Normal file
57
vendor/github.com/PuerkitoBio/purell/bench_test.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
package purell
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
safeUrl = "HttPS://..iaMHost..Test:443/paTh^A%ef//./%41PaTH/..//?"
|
||||
usuallySafeUrl = "HttPS://..iaMHost..Test:443/paTh^A%ef//./%41PaTH/../final/"
|
||||
unsafeUrl = "HttPS://..www.iaMHost..Test:443/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
|
||||
allDWORDUrl = "HttPS://1113982867:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
|
||||
allOctalUrl = "HttPS://0102.0146.07.0223:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
|
||||
allHexUrl = "HttPS://0x42660793:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
|
||||
allCombinedUrl = "HttPS://..0x42660793.:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
|
||||
)
|
||||
|
||||
func BenchmarkSafe(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
NormalizeURLString(safeUrl, FlagsSafe)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUsuallySafe(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
NormalizeURLString(usuallySafeUrl, FlagsUsuallySafeGreedy)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnsafe(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
NormalizeURLString(unsafeUrl, FlagsUnsafeGreedy)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAllDWORD(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
NormalizeURLString(allDWORDUrl, FlagsAllGreedy)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAllOctal(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
NormalizeURLString(allOctalUrl, FlagsAllGreedy)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAllHex(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
NormalizeURLString(allHexUrl, FlagsAllGreedy)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAllCombined(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
NormalizeURLString(allCombinedUrl, FlagsAllGreedy)
|
||||
}
|
||||
}
|
9
vendor/github.com/PuerkitoBio/purell/benchmarks/v0.1.0
generated
vendored
Normal file
9
vendor/github.com/PuerkitoBio/purell/benchmarks/v0.1.0
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
PASS
|
||||
BenchmarkSafe 500000 6131 ns/op
|
||||
BenchmarkUsuallySafe 200000 7864 ns/op
|
||||
BenchmarkUnsafe 100000 28560 ns/op
|
||||
BenchmarkAllDWORD 50000 38722 ns/op
|
||||
BenchmarkAllOctal 50000 40941 ns/op
|
||||
BenchmarkAllHex 50000 44063 ns/op
|
||||
BenchmarkAllCombined 50000 33613 ns/op
|
||||
ok github.com/PuerkitoBio/purell 17.404s
|
35
vendor/github.com/PuerkitoBio/purell/example_test.go
generated
vendored
Normal file
35
vendor/github.com/PuerkitoBio/purell/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package purell
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func ExampleNormalizeURLString() {
|
||||
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
|
||||
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
// Output: http://somewebsite.com:80/Amazing%3F/url/
|
||||
}
|
||||
|
||||
func ExampleMustNormalizeURLString() {
|
||||
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
|
||||
FlagsUnsafeGreedy)
|
||||
fmt.Print(normalized)
|
||||
|
||||
// Output: http://somewebsite.com/Amazing%FA/url
|
||||
}
|
||||
|
||||
func ExampleNormalizeURL() {
|
||||
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
|
||||
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
|
||||
}
|
36
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
36
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
|
@ -15,8 +15,8 @@ import (
|
|||
|
||||
"github.com/PuerkitoBio/urlesc"
|
||||
"golang.org/x/net/idna"
|
||||
"golang.org/x/text/secure/precis"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
"golang.org/x/text/width"
|
||||
)
|
||||
|
||||
// A set of normalization flags determines how a URL will
|
||||
|
@ -150,22 +150,26 @@ func MustNormalizeURLString(u string, f NormalizationFlags) string {
|
|||
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
|
||||
if parsed, e := url.Parse(u); e != nil {
|
||||
return "", e
|
||||
} else {
|
||||
options := make([]precis.Option, 1, 3)
|
||||
options[0] = precis.IgnoreCase
|
||||
if f&FlagLowercaseHost == FlagLowercaseHost {
|
||||
options = append(options, precis.FoldCase())
|
||||
}
|
||||
options = append(options, precis.Norm(norm.NFC))
|
||||
profile := precis.NewFreeform(options...)
|
||||
if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil {
|
||||
return "", e
|
||||
}
|
||||
return NormalizeURL(parsed, f), nil
|
||||
parsed, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
panic("Unreachable code.")
|
||||
|
||||
if f&FlagLowercaseHost == FlagLowercaseHost {
|
||||
parsed.Host = strings.ToLower(parsed.Host)
|
||||
}
|
||||
|
||||
// The idna package doesn't fully conform to RFC 5895
|
||||
// (https://tools.ietf.org/html/rfc5895), so we do it here.
|
||||
// Taken from Go 1.8 cycle source, courtesy of bradfitz.
|
||||
// TODO: Remove when (if?) idna package conforms to RFC 5895.
|
||||
parsed.Host = width.Fold.String(parsed.Host)
|
||||
parsed.Host = norm.NFC.String(parsed.Host)
|
||||
if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return NormalizeURL(parsed, f), nil
|
||||
}
|
||||
|
||||
// NormalizeURL returns the normalized string.
|
||||
|
|
768
vendor/github.com/PuerkitoBio/purell/purell_test.go
generated
vendored
Normal file
768
vendor/github.com/PuerkitoBio/purell/purell_test.go
generated
vendored
Normal file
|
@ -0,0 +1,768 @@
|
|||
package purell
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
nm string
|
||||
src string
|
||||
flgs NormalizationFlags
|
||||
res string
|
||||
parsed bool
|
||||
}
|
||||
|
||||
var (
|
||||
cases = [...]*testCase{
|
||||
&testCase{
|
||||
"LowerScheme",
|
||||
"HTTP://www.SRC.ca",
|
||||
FlagLowercaseScheme,
|
||||
"http://www.SRC.ca",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"LowerScheme2",
|
||||
"http://www.SRC.ca",
|
||||
FlagLowercaseScheme,
|
||||
"http://www.SRC.ca",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"LowerHost",
|
||||
"HTTP://www.SRC.ca/",
|
||||
FlagLowercaseHost,
|
||||
"http://www.src.ca/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"UpperEscapes",
|
||||
`http://www.whatever.com/Some%aa%20Special%8Ecases/`,
|
||||
FlagUppercaseEscapes,
|
||||
"http://www.whatever.com/Some%AA%20Special%8Ecases/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"UnnecessaryEscapes",
|
||||
`http://www.toto.com/%41%42%2E%44/%32%33%52%2D/%5f%7E`,
|
||||
FlagDecodeUnnecessaryEscapes,
|
||||
"http://www.toto.com/AB.D/23R-/_~",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveDefaultPort",
|
||||
"HTTP://www.SRC.ca:80/",
|
||||
FlagRemoveDefaultPort,
|
||||
"http://www.SRC.ca/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveDefaultPort2",
|
||||
"HTTP://www.SRC.ca:80",
|
||||
FlagRemoveDefaultPort,
|
||||
"http://www.SRC.ca", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveDefaultPort3",
|
||||
"HTTP://www.SRC.ca:8080",
|
||||
FlagRemoveDefaultPort,
|
||||
"http://www.SRC.ca:8080", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Safe",
|
||||
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e",
|
||||
FlagsSafe,
|
||||
"http://www.src.ca/to%1Ato%8B%EE/OKnowABC~",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"BothLower",
|
||||
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e",
|
||||
FlagLowercaseHost | FlagLowercaseScheme,
|
||||
"http://www.src.ca:80/to%1Ato%8B%EE/OKnowABC~",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveTrailingSlash",
|
||||
"HTTP://www.SRC.ca:80/",
|
||||
FlagRemoveTrailingSlash,
|
||||
"http://www.SRC.ca:80", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveTrailingSlash2",
|
||||
"HTTP://www.SRC.ca:80/toto/titi/",
|
||||
FlagRemoveTrailingSlash,
|
||||
"http://www.SRC.ca:80/toto/titi", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveTrailingSlash3",
|
||||
"HTTP://www.SRC.ca:80/toto/titi/fin/?a=1",
|
||||
FlagRemoveTrailingSlash,
|
||||
"http://www.SRC.ca:80/toto/titi/fin?a=1", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"AddTrailingSlash",
|
||||
"HTTP://www.SRC.ca:80",
|
||||
FlagAddTrailingSlash,
|
||||
"http://www.SRC.ca:80/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"AddTrailingSlash2",
|
||||
"HTTP://www.SRC.ca:80/toto/titi.html",
|
||||
FlagAddTrailingSlash,
|
||||
"http://www.SRC.ca:80/toto/titi.html/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"AddTrailingSlash3",
|
||||
"HTTP://www.SRC.ca:80/toto/titi/fin?a=1",
|
||||
FlagAddTrailingSlash,
|
||||
"http://www.SRC.ca:80/toto/titi/fin/?a=1", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveDotSegments",
|
||||
"HTTP://root/a/b/./../../c/",
|
||||
FlagRemoveDotSegments,
|
||||
"http://root/c/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveDotSegments2",
|
||||
"HTTP://root/../a/b/./../c/../d",
|
||||
FlagRemoveDotSegments,
|
||||
"http://root/a/d", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"UsuallySafe",
|
||||
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/./c/d/../OKnow%41%42%43%7e/?a=b#test",
|
||||
FlagsUsuallySafeGreedy,
|
||||
"http://www.src.ca/to%1Ato%8B%EE/c/OKnowABC~?a=b#test",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveDirectoryIndex",
|
||||
"HTTP://root/a/b/c/default.aspx",
|
||||
FlagRemoveDirectoryIndex,
|
||||
"http://root/a/b/c/", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveDirectoryIndex2",
|
||||
"HTTP://root/a/b/c/default#a=b",
|
||||
FlagRemoveDirectoryIndex,
|
||||
"http://root/a/b/c/default#a=b", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveFragment",
|
||||
"HTTP://root/a/b/c/default#toto=tata",
|
||||
FlagRemoveFragment,
|
||||
"http://root/a/b/c/default", // Since Go1.1, scheme is automatically lowercased
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"ForceHTTP",
|
||||
"https://root/a/b/c/default#toto=tata",
|
||||
FlagForceHTTP,
|
||||
"http://root/a/b/c/default#toto=tata",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveDuplicateSlashes",
|
||||
"https://root/a//b///c////default#toto=tata",
|
||||
FlagRemoveDuplicateSlashes,
|
||||
"https://root/a/b/c/default#toto=tata",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveDuplicateSlashes2",
|
||||
"https://root//a//b///c////default#toto=tata",
|
||||
FlagRemoveDuplicateSlashes,
|
||||
"https://root/a/b/c/default#toto=tata",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveWWW",
|
||||
"https://www.root/a/b/c/",
|
||||
FlagRemoveWWW,
|
||||
"https://root/a/b/c/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveWWW2",
|
||||
"https://WwW.Root/a/b/c/",
|
||||
FlagRemoveWWW,
|
||||
"https://Root/a/b/c/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"AddWWW",
|
||||
"https://Root/a/b/c/",
|
||||
FlagAddWWW,
|
||||
"https://www.Root/a/b/c/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"SortQuery",
|
||||
"http://root/toto/?b=4&a=1&c=3&b=2&a=5",
|
||||
FlagSortQuery,
|
||||
"http://root/toto/?a=1&a=5&b=2&b=4&c=3",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"RemoveEmptyQuerySeparator",
|
||||
"http://root/toto/?",
|
||||
FlagRemoveEmptyQuerySeparator,
|
||||
"http://root/toto/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Unsafe",
|
||||
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
|
||||
FlagsUnsafeGreedy,
|
||||
"http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Safe2",
|
||||
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
|
||||
FlagsSafe,
|
||||
"https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"UsuallySafe2",
|
||||
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
|
||||
FlagsUsuallySafeGreedy,
|
||||
"https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"AddTrailingSlashBug",
|
||||
"http://src.ca/",
|
||||
FlagsAllNonGreedy,
|
||||
"http://www.src.ca/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"SourceModified",
|
||||
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
|
||||
FlagsUnsafeGreedy,
|
||||
"http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3",
|
||||
true,
|
||||
},
|
||||
&testCase{
|
||||
"IPv6-1",
|
||||
"http://[2001:db8:1f70::999:de8:7648:6e8]/test",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://[2001:db8:1f70::999:de8:7648:6e8]/test",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"IPv6-2",
|
||||
"http://[::ffff:192.168.1.1]/test",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://[::ffff:192.168.1.1]/test",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"IPv6-3",
|
||||
"http://[::ffff:192.168.1.1]:80/test",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://[::ffff:192.168.1.1]/test",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"IPv6-4",
|
||||
"htTps://[::fFff:192.168.1.1]:443/test",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"https://[::ffff:192.168.1.1]/test",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"FTP",
|
||||
"ftp://user:pass@ftp.foo.net/foo/bar",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"ftp://user:pass@ftp.foo.net/foo/bar",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Standard-1",
|
||||
"http://www.foo.com:80/foo",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://www.foo.com/foo",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Standard-2",
|
||||
"http://www.foo.com:8000/foo",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://www.foo.com:8000/foo",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Standard-3",
|
||||
"http://www.foo.com/%7ebar",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://www.foo.com/~bar",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Standard-4",
|
||||
"http://www.foo.com/%7Ebar",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://www.foo.com/~bar",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Standard-5",
|
||||
"http://USER:pass@www.Example.COM/foo/bar",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://USER:pass@www.example.com/foo/bar",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Standard-6",
|
||||
"http://test.example/?a=%26&b=1",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://test.example/?a=%26&b=1",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Standard-7",
|
||||
"http://test.example/%25/?p=%20val%20%25",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://test.example/%25/?p=%20val%20%25",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Standard-8",
|
||||
"http://test.example/path/with a%20space+/",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://test.example/path/with%20a%20space+/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Standard-9",
|
||||
"http://test.example/?",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://test.example/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Standard-10",
|
||||
"http://a.COM/path/?b&a",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://a.com/path/?b&a",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"StandardCasesAddTrailingSlash",
|
||||
"http://test.example?",
|
||||
FlagsSafe | FlagAddTrailingSlash,
|
||||
"http://test.example/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"OctalIP-1",
|
||||
"http://0123.011.0.4/",
|
||||
FlagsSafe | FlagDecodeOctalHost,
|
||||
"http://0123.011.0.4/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"OctalIP-2",
|
||||
"http://0102.0146.07.0223/",
|
||||
FlagsSafe | FlagDecodeOctalHost,
|
||||
"http://66.102.7.147/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"OctalIP-3",
|
||||
"http://0102.0146.07.0223.:23/",
|
||||
FlagsSafe | FlagDecodeOctalHost,
|
||||
"http://66.102.7.147.:23/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"OctalIP-4",
|
||||
"http://USER:pass@0102.0146.07.0223../",
|
||||
FlagsSafe | FlagDecodeOctalHost,
|
||||
"http://USER:pass@66.102.7.147../",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"DWORDIP-1",
|
||||
"http://123.1113982867/",
|
||||
FlagsSafe | FlagDecodeDWORDHost,
|
||||
"http://123.1113982867/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"DWORDIP-2",
|
||||
"http://1113982867/",
|
||||
FlagsSafe | FlagDecodeDWORDHost,
|
||||
"http://66.102.7.147/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"DWORDIP-3",
|
||||
"http://1113982867.:23/",
|
||||
FlagsSafe | FlagDecodeDWORDHost,
|
||||
"http://66.102.7.147.:23/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"DWORDIP-4",
|
||||
"http://USER:pass@1113982867../",
|
||||
FlagsSafe | FlagDecodeDWORDHost,
|
||||
"http://USER:pass@66.102.7.147../",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"HexIP-1",
|
||||
"http://0x123.1113982867/",
|
||||
FlagsSafe | FlagDecodeHexHost,
|
||||
"http://0x123.1113982867/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"HexIP-2",
|
||||
"http://0x42660793/",
|
||||
FlagsSafe | FlagDecodeHexHost,
|
||||
"http://66.102.7.147/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"HexIP-3",
|
||||
"http://0x42660793.:23/",
|
||||
FlagsSafe | FlagDecodeHexHost,
|
||||
"http://66.102.7.147.:23/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"HexIP-4",
|
||||
"http://USER:pass@0x42660793../",
|
||||
FlagsSafe | FlagDecodeHexHost,
|
||||
"http://USER:pass@66.102.7.147../",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"UnnecessaryHostDots-1",
|
||||
"http://.www.foo.com../foo/bar.html",
|
||||
FlagsSafe | FlagRemoveUnnecessaryHostDots,
|
||||
"http://www.foo.com/foo/bar.html",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"UnnecessaryHostDots-2",
|
||||
"http://www.foo.com./foo/bar.html",
|
||||
FlagsSafe | FlagRemoveUnnecessaryHostDots,
|
||||
"http://www.foo.com/foo/bar.html",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"UnnecessaryHostDots-3",
|
||||
"http://www.foo.com.:81/foo",
|
||||
FlagsSafe | FlagRemoveUnnecessaryHostDots,
|
||||
"http://www.foo.com:81/foo",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"UnnecessaryHostDots-4",
|
||||
"http://www.example.com./",
|
||||
FlagsSafe | FlagRemoveUnnecessaryHostDots,
|
||||
"http://www.example.com/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"EmptyPort-1",
|
||||
"http://www.thedraymin.co.uk:/main/?p=308",
|
||||
FlagsSafe | FlagRemoveEmptyPortSeparator,
|
||||
"http://www.thedraymin.co.uk/main/?p=308",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"EmptyPort-2",
|
||||
"http://www.src.ca:",
|
||||
FlagsSafe | FlagRemoveEmptyPortSeparator,
|
||||
"http://www.src.ca",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-1",
|
||||
"http://test.example/foo/bar/.",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/bar/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-2",
|
||||
"http://test.example/foo/bar/./",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/bar/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-3",
|
||||
"http://test.example/foo/bar/..",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-4",
|
||||
"http://test.example/foo/bar/../",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-5",
|
||||
"http://test.example/foo/bar/../baz",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/baz",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-6",
|
||||
"http://test.example/foo/bar/../..",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-7",
|
||||
"http://test.example/foo/bar/../../",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-8",
|
||||
"http://test.example/foo/bar/../../baz",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/baz",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-9",
|
||||
"http://test.example/foo/bar/../../../baz",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/baz",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-10",
|
||||
"http://test.example/foo/bar/../../../../baz",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/baz",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-11",
|
||||
"http://test.example/./foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-12",
|
||||
"http://test.example/../foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-13",
|
||||
"http://test.example/foo.",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo.",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-14",
|
||||
"http://test.example/.foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/.foo",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-15",
|
||||
"http://test.example/foo..",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo..",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-16",
|
||||
"http://test.example/..foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/..foo",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-17",
|
||||
"http://test.example/./../foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-18",
|
||||
"http://test.example/./foo/.",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-19",
|
||||
"http://test.example/foo/./bar",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/bar",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-20",
|
||||
"http://test.example/foo/../bar",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/bar",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-21",
|
||||
"http://test.example/foo//",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Slashes-22",
|
||||
"http://test.example/foo///bar//",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"http://test.example/foo/bar/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Relative",
|
||||
"foo/bar",
|
||||
FlagsAllGreedy,
|
||||
"foo/bar",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Relative-1",
|
||||
"./../foo",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"foo",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Relative-2",
|
||||
"./foo/bar/../baz/../bang/..",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"foo/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Relative-3",
|
||||
"foo///bar//",
|
||||
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
|
||||
"foo/bar/",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"Relative-4",
|
||||
"www.youtube.com",
|
||||
FlagsUsuallySafeGreedy,
|
||||
"www.youtube.com",
|
||||
false,
|
||||
},
|
||||
/*&testCase{
|
||||
"UrlNorm-5",
|
||||
"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3",
|
||||
FlagsSafe | FlagRemoveDotSegments,
|
||||
"http://ja.wikipedia.org/wiki/\xe3\x82\xad\xe3\x83\xa3\xe3\x82\xbf\xe3\x83\x94\xe3\x83\xa9\xe3\x83\xbc\xe3\x82\xb8\xe3\x83\xa3\xe3\x83\x91\xe3\x83\xb3",
|
||||
false,
|
||||
},
|
||||
&testCase{
|
||||
"UrlNorm-1",
|
||||
"http://test.example/?a=%e3%82%82%26",
|
||||
FlagsAllGreedy,
|
||||
"http://test.example/?a=\xe3\x82\x82%26",
|
||||
false,
|
||||
},*/
|
||||
}
|
||||
)
|
||||
|
||||
func TestRunner(t *testing.T) {
|
||||
for _, tc := range cases {
|
||||
runCase(tc, t)
|
||||
}
|
||||
}
|
||||
|
||||
func runCase(tc *testCase, t *testing.T) {
|
||||
t.Logf("running %s...", tc.nm)
|
||||
if tc.parsed {
|
||||
u, e := url.Parse(tc.src)
|
||||
if e != nil {
|
||||
t.Errorf("%s - FAIL : %s", tc.nm, e)
|
||||
return
|
||||
} else {
|
||||
NormalizeURL(u, tc.flgs)
|
||||
if s := u.String(); s != tc.res {
|
||||
t.Errorf("%s - FAIL expected '%s', got '%s'", tc.nm, tc.res, s)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if s, e := NormalizeURLString(tc.src, tc.flgs); e != nil {
|
||||
t.Errorf("%s - FAIL : %s", tc.nm, e)
|
||||
} else if s != tc.res {
|
||||
t.Errorf("%s - FAIL expected '%s', got '%s'", tc.nm, tc.res, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeUnnecessaryEscapesAll(t *testing.T) {
|
||||
var url = "http://host/"
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
url += fmt.Sprintf("%%%02x", i)
|
||||
}
|
||||
if s, e := NormalizeURLString(url, FlagDecodeUnnecessaryEscapes); e != nil {
|
||||
t.Fatalf("Got error %s", e.Error())
|
||||
} else {
|
||||
const want = "http://host/%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20!%22%23$%25&'()*+,-./0123456789:;%3C=%3E%3F@ABCDEFGHIJKLMNOPQRSTUVWXYZ[%5C]%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~%7F%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF"
|
||||
if s != want {
|
||||
t.Errorf("DecodeUnnecessaryEscapesAll:\nwant\n%s\ngot\n%s", want, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeNecessaryEscapesAll(t *testing.T) {
|
||||
var url = "http://host/"
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
if i != 0x25 {
|
||||
url += string(i)
|
||||
}
|
||||
}
|
||||
if s, e := NormalizeURLString(url, FlagEncodeNecessaryEscapes); e != nil {
|
||||
t.Fatalf("Got error %s", e.Error())
|
||||
} else {
|
||||
const want = "http://host/%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20!%22#$&'()*+,-./0123456789:;%3C=%3E?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[%5C]%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%C2%A0%C2%A1%C2%A2%C2%A3%C2%A4%C2%A5%C2%A6%C2%A7%C2%A8%C2%A9%C2%AA%C2%AB%C2%AC%C2%AD%C2%AE%C2%AF%C2%B0%C2%B1%C2%B2%C2%B3%C2%B4%C2%B5%C2%B6%C2%B7%C2%B8%C2%B9%C2%BA%C2%BB%C2%BC%C2%BD%C2%BE%C2%BF%C3%80%C3%81%C3%82%C3%83%C3%84%C3%85%C3%86%C3%87%C3%88%C3%89%C3%8A%C3%8B%C3%8C%C3%8D%C3%8E%C3%8F%C3%90%C3%91%C3%92%C3%93%C3%94%C3%95%C3%96%C3%97%C3%98%C3%99%C3%9A%C3%9B%C3%9C%C3%9D%C3%9E%C3%9F%C3%A0%C3%A1%C3%A2%C3%A3%C3%A4%C3%A5%C3%A6%C3%A7%C3%A8%C3%A9%C3%AA%C3%AB%C3%AC%C3%AD%C3%AE%C3%AF%C3%B0%C3%B1%C3%B2%C3%B3%C3%B4%C3%B5%C3%B6%C3%B7%C3%B8%C3%B9%C3%BA%C3%BB%C3%BC%C3%BD%C3%BE%C3%BF"
|
||||
if s != want {
|
||||
t.Errorf("EncodeNecessaryEscapesAll:\nwant\n%s\ngot\n%s", want, s)
|
||||
}
|
||||
}
|
||||
}
|
53
vendor/github.com/PuerkitoBio/purell/urlnorm_test.go
generated
vendored
Normal file
53
vendor/github.com/PuerkitoBio/purell/urlnorm_test.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
package purell
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test cases merged from PR #1
|
||||
// Originally from https://github.com/jehiah/urlnorm/blob/master/test_urlnorm.py
|
||||
|
||||
func assertMap(t *testing.T, cases map[string]string, f NormalizationFlags) {
|
||||
for bad, good := range cases {
|
||||
s, e := NormalizeURLString(bad, f)
|
||||
if e != nil {
|
||||
t.Errorf("%s normalizing %v to %v", e.Error(), bad, good)
|
||||
} else {
|
||||
if s != good {
|
||||
t.Errorf("source: %v expected: %v got: %v", bad, good, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This tests normalization to a unicode representation
|
||||
// precent escapes for unreserved values are unescaped to their unicode value
|
||||
// tests normalization to idna domains
|
||||
// test ip word handling, ipv6 address handling, and trailing domain periods
|
||||
// in general, this matches google chromes unescaping for things in the address bar.
|
||||
// spaces are converted to '+' (perhaphs controversial)
|
||||
// http://code.google.com/p/google-url/ probably is another good reference for this approach
|
||||
func TestUrlnorm(t *testing.T) {
|
||||
testcases := map[string]string{
|
||||
"http://test.example/?a=%e3%82%82%26": "http://test.example/?a=%e3%82%82%26",
|
||||
//"http://test.example/?a=%e3%82%82%26": "http://test.example/?a=\xe3\x82\x82%26", //should return a unicode character
|
||||
"http://s.xn--q-bga.DE/": "http://s.xn--q-bga.de/", //should be in idna format
|
||||
"http://XBLA\u306eXbox.com": "http://xn--xblaxbox-jf4g.com", //test utf8 and unicode
|
||||
"http://президент.рф": "http://xn--d1abbgf6aiiy.xn--p1ai",
|
||||
"http://ПРЕЗИДЕНТ.РФ": "http://xn--d1abbgf6aiiy.xn--p1ai",
|
||||
"http://ab¥ヲ₩○.com": "http://xn--ab-ida8983azmfnvs.com", //test width folding
|
||||
"http://\u00e9.com": "http://xn--9ca.com",
|
||||
"http://e\u0301.com": "http://xn--9ca.com",
|
||||
"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3": "http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3",
|
||||
//"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3": "http://ja.wikipedia.org/wiki/\xe3\x82\xad\xe3\x83\xa3\xe3\x82\xbf\xe3\x83\x94\xe3\x83\xa9\xe3\x83\xbc\xe3\x82\xb8\xe3\x83\xa3\xe3\x83\x91\xe3\x83\xb3",
|
||||
|
||||
"http://test.example/\xe3\x82\xad": "http://test.example/%E3%82%AD",
|
||||
//"http://test.example/\xe3\x82\xad": "http://test.example/\xe3\x82\xad",
|
||||
"http://test.example/?p=%23val#test-%23-val%25": "http://test.example/?p=%23val#test-%23-val%25", //check that %23 (#) is not escaped where it shouldn't be
|
||||
|
||||
"http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n": "http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n",
|
||||
//"http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n": "http://test.domain/I\xc3\xb1t\xc3\xabrn\xc3\xa2ti\xc3\xb4n\xef\xbf\xbdliz\xc3\xa6ti\xc3\xb8n",
|
||||
}
|
||||
|
||||
assertMap(t, testcases, FlagsSafe|FlagRemoveDotSegments)
|
||||
}
|
6
vendor/github.com/PuerkitoBio/urlesc/.travis.yml
generated
vendored
6
vendor/github.com/PuerkitoBio/urlesc/.travis.yml
generated
vendored
|
@ -1,7 +1,11 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.4.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- tip
|
||||
|
||||
install:
|
||||
|
|
2
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
2
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
urlesc [](https://travis-ci.org/PuerkitoBio/urlesc) [](http://godoc.org/github.com/PuerkitoBio/urlesc)
|
||||
urlesc [](https://travis-ci.org/PuerkitoBio/urlesc) [](http://godoc.org/github.com/PuerkitoBio/urlesc)
|
||||
======
|
||||
|
||||
Package urlesc implements query escaping as per RFC 3986.
|
||||
|
|
641
vendor/github.com/PuerkitoBio/urlesc/urlesc_test.go
generated
vendored
Normal file
641
vendor/github.com/PuerkitoBio/urlesc/urlesc_test.go
generated
vendored
Normal file
|
@ -0,0 +1,641 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package urlesc
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type URLTest struct {
|
||||
in string
|
||||
out *url.URL
|
||||
roundtrip string // expected result of reserializing the URL; empty means same as "in".
|
||||
}
|
||||
|
||||
var urltests = []URLTest{
|
||||
// no path
|
||||
{
|
||||
"http://www.google.com",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// path
|
||||
{
|
||||
"http://www.google.com/",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// path with hex escaping
|
||||
{
|
||||
"http://www.google.com/file%20one%26two",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/file one&two",
|
||||
},
|
||||
"http://www.google.com/file%20one&two",
|
||||
},
|
||||
// user
|
||||
{
|
||||
"ftp://webmaster@www.google.com/",
|
||||
&url.URL{
|
||||
Scheme: "ftp",
|
||||
User: url.User("webmaster"),
|
||||
Host: "www.google.com",
|
||||
Path: "/",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// escape sequence in username
|
||||
{
|
||||
"ftp://john%20doe@www.google.com/",
|
||||
&url.URL{
|
||||
Scheme: "ftp",
|
||||
User: url.User("john doe"),
|
||||
Host: "www.google.com",
|
||||
Path: "/",
|
||||
},
|
||||
"ftp://john%20doe@www.google.com/",
|
||||
},
|
||||
// query
|
||||
{
|
||||
"http://www.google.com/?q=go+language",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/",
|
||||
RawQuery: "q=go+language",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// query with hex escaping: NOT parsed
|
||||
{
|
||||
"http://www.google.com/?q=go%20language",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/",
|
||||
RawQuery: "q=go%20language",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// %20 outside query
|
||||
{
|
||||
"http://www.google.com/a%20b?q=c+d",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/a b",
|
||||
RawQuery: "q=c+d",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// path without leading /, so no parsing
|
||||
{
|
||||
"http:www.google.com/?q=go+language",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Opaque: "www.google.com/",
|
||||
RawQuery: "q=go+language",
|
||||
},
|
||||
"http:www.google.com/?q=go+language",
|
||||
},
|
||||
// path without leading /, so no parsing
|
||||
{
|
||||
"http:%2f%2fwww.google.com/?q=go+language",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Opaque: "%2f%2fwww.google.com/",
|
||||
RawQuery: "q=go+language",
|
||||
},
|
||||
"http:%2f%2fwww.google.com/?q=go+language",
|
||||
},
|
||||
// non-authority with path
|
||||
{
|
||||
"mailto:/webmaster@golang.org",
|
||||
&url.URL{
|
||||
Scheme: "mailto",
|
||||
Path: "/webmaster@golang.org",
|
||||
},
|
||||
"mailto:///webmaster@golang.org", // unfortunate compromise
|
||||
},
|
||||
// non-authority
|
||||
{
|
||||
"mailto:webmaster@golang.org",
|
||||
&url.URL{
|
||||
Scheme: "mailto",
|
||||
Opaque: "webmaster@golang.org",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// unescaped :// in query should not create a scheme
|
||||
{
|
||||
"/foo?query=http://bad",
|
||||
&url.URL{
|
||||
Path: "/foo",
|
||||
RawQuery: "query=http://bad",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// leading // without scheme should create an authority
|
||||
{
|
||||
"//foo",
|
||||
&url.URL{
|
||||
Host: "foo",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// leading // without scheme, with userinfo, path, and query
|
||||
{
|
||||
"//user@foo/path?a=b",
|
||||
&url.URL{
|
||||
User: url.User("user"),
|
||||
Host: "foo",
|
||||
Path: "/path",
|
||||
RawQuery: "a=b",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// Three leading slashes isn't an authority, but doesn't return an error.
|
||||
// (We can't return an error, as this code is also used via
|
||||
// ServeHTTP -> ReadRequest -> Parse, which is arguably a
|
||||
// different URL parsing context, but currently shares the
|
||||
// same codepath)
|
||||
{
|
||||
"///threeslashes",
|
||||
&url.URL{
|
||||
Path: "///threeslashes",
|
||||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
"http://user:password@google.com",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
User: url.UserPassword("user", "password"),
|
||||
Host: "google.com",
|
||||
},
|
||||
"http://user:password@google.com",
|
||||
},
|
||||
// unescaped @ in username should not confuse host
|
||||
{
|
||||
"http://j@ne:password@google.com",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
User: url.UserPassword("j@ne", "password"),
|
||||
Host: "google.com",
|
||||
},
|
||||
"http://j%40ne:password@google.com",
|
||||
},
|
||||
// unescaped @ in password should not confuse host
|
||||
{
|
||||
"http://jane:p@ssword@google.com",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
User: url.UserPassword("jane", "p@ssword"),
|
||||
Host: "google.com",
|
||||
},
|
||||
"http://jane:p%40ssword@google.com",
|
||||
},
|
||||
{
|
||||
"http://j@ne:password@google.com/p@th?q=@go",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
User: url.UserPassword("j@ne", "password"),
|
||||
Host: "google.com",
|
||||
Path: "/p@th",
|
||||
RawQuery: "q=@go",
|
||||
},
|
||||
"http://j%40ne:password@google.com/p@th?q=@go",
|
||||
},
|
||||
{
|
||||
"http://www.google.com/?q=go+language#foo",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/",
|
||||
RawQuery: "q=go+language",
|
||||
Fragment: "foo",
|
||||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
"http://www.google.com/?q=go+language#foo%26bar",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/",
|
||||
RawQuery: "q=go+language",
|
||||
Fragment: "foo&bar",
|
||||
},
|
||||
"http://www.google.com/?q=go+language#foo&bar",
|
||||
},
|
||||
{
|
||||
"file:///home/adg/rabbits",
|
||||
&url.URL{
|
||||
Scheme: "file",
|
||||
Host: "",
|
||||
Path: "/home/adg/rabbits",
|
||||
},
|
||||
"file:///home/adg/rabbits",
|
||||
},
|
||||
// "Windows" paths are no exception to the rule.
|
||||
// See golang.org/issue/6027, especially comment #9.
|
||||
{
|
||||
"file:///C:/FooBar/Baz.txt",
|
||||
&url.URL{
|
||||
Scheme: "file",
|
||||
Host: "",
|
||||
Path: "/C:/FooBar/Baz.txt",
|
||||
},
|
||||
"file:///C:/FooBar/Baz.txt",
|
||||
},
|
||||
// case-insensitive scheme
|
||||
{
|
||||
"MaIlTo:webmaster@golang.org",
|
||||
&url.URL{
|
||||
Scheme: "mailto",
|
||||
Opaque: "webmaster@golang.org",
|
||||
},
|
||||
"mailto:webmaster@golang.org",
|
||||
},
|
||||
// Relative path
|
||||
{
|
||||
"a/b/c",
|
||||
&url.URL{
|
||||
Path: "a/b/c",
|
||||
},
|
||||
"a/b/c",
|
||||
},
|
||||
// escaped '?' in username and password
|
||||
{
|
||||
"http://%3Fam:pa%3Fsword@google.com",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
User: url.UserPassword("?am", "pa?sword"),
|
||||
Host: "google.com",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// escaped '?' and '#' in path
|
||||
{
|
||||
"http://example.com/%3F%23",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "example.com",
|
||||
Path: "?#",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// unescaped [ ] ! ' ( ) * in path
|
||||
{
|
||||
"http://example.com/[]!'()*",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "example.com",
|
||||
Path: "[]!'()*",
|
||||
},
|
||||
"http://example.com/[]!'()*",
|
||||
},
|
||||
// escaped : / ? # [ ] @ in username and password
|
||||
{
|
||||
"http://%3A%2F%3F:%23%5B%5D%40@example.com",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
User: url.UserPassword(":/?", "#[]@"),
|
||||
Host: "example.com",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// unescaped ! $ & ' ( ) * + , ; = in username and password
|
||||
{
|
||||
"http://!$&'():*+,;=@example.com",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
User: url.UserPassword("!$&'()", "*+,;="),
|
||||
Host: "example.com",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// unescaped = : / . ? = in query component
|
||||
{
|
||||
"http://example.com/?q=http://google.com/?q=",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "example.com",
|
||||
Path: "/",
|
||||
RawQuery: "q=http://google.com/?q=",
|
||||
},
|
||||
"",
|
||||
},
|
||||
// unescaped : / ? [ ] @ ! $ & ' ( ) * + , ; = in fragment
|
||||
{
|
||||
"http://example.com/#:/?%23[]@!$&'()*+,;=",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "example.com",
|
||||
Path: "/",
|
||||
Fragment: ":/?#[]@!$&'()*+,;=",
|
||||
},
|
||||
"",
|
||||
},
|
||||
}
|
||||
|
||||
func DoTestString(t *testing.T, parse func(string) (*url.URL, error), name string, tests []URLTest) {
|
||||
for _, tt := range tests {
|
||||
u, err := parse(tt.in)
|
||||
if err != nil {
|
||||
t.Errorf("%s(%q) returned error %s", name, tt.in, err)
|
||||
continue
|
||||
}
|
||||
expected := tt.in
|
||||
if len(tt.roundtrip) > 0 {
|
||||
expected = tt.roundtrip
|
||||
}
|
||||
s := Escape(u)
|
||||
if s != expected {
|
||||
t.Errorf("Escape(%s(%q)) == %q (expected %q)", name, tt.in, s, expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLString(t *testing.T) {
|
||||
DoTestString(t, url.Parse, "Parse", urltests)
|
||||
|
||||
// no leading slash on path should prepend
|
||||
// slash on String() call
|
||||
noslash := URLTest{
|
||||
"http://www.google.com/search",
|
||||
&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "search",
|
||||
},
|
||||
"",
|
||||
}
|
||||
s := Escape(noslash.out)
|
||||
if s != noslash.in {
|
||||
t.Errorf("Expected %s; go %s", noslash.in, s)
|
||||
}
|
||||
}
|
||||
|
||||
type EscapeTest struct {
|
||||
in string
|
||||
out string
|
||||
err error
|
||||
}
|
||||
|
||||
var escapeTests = []EscapeTest{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"abc",
|
||||
"abc",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"one two",
|
||||
"one+two",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"10%",
|
||||
"10%25",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
" ?&=#+%!<>#\"{}|\\^[]`☺\t:/@$'()*,;",
|
||||
"+?%26%3D%23%2B%25%21%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%E2%98%BA%09%3A/%40%24%27%28%29%2A%2C%3B",
|
||||
nil,
|
||||
},
|
||||
}
|
||||
|
||||
func TestEscape(t *testing.T) {
|
||||
for _, tt := range escapeTests {
|
||||
actual := QueryEscape(tt.in)
|
||||
if tt.out != actual {
|
||||
t.Errorf("QueryEscape(%q) = %q, want %q", tt.in, actual, tt.out)
|
||||
}
|
||||
|
||||
// for bonus points, verify that escape:unescape is an identity.
|
||||
roundtrip, err := url.QueryUnescape(actual)
|
||||
if roundtrip != tt.in || err != nil {
|
||||
t.Errorf("QueryUnescape(%q) = %q, %s; want %q, %s", actual, roundtrip, err, tt.in, "[no error]")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var resolveReferenceTests = []struct {
|
||||
base, rel, expected string
|
||||
}{
|
||||
// Absolute URL references
|
||||
{"http://foo.com?a=b", "https://bar.com/", "https://bar.com/"},
|
||||
{"http://foo.com/", "https://bar.com/?a=b", "https://bar.com/?a=b"},
|
||||
{"http://foo.com/bar", "mailto:foo@example.com", "mailto:foo@example.com"},
|
||||
|
||||
// Path-absolute references
|
||||
{"http://foo.com/bar", "/baz", "http://foo.com/baz"},
|
||||
{"http://foo.com/bar?a=b#f", "/baz", "http://foo.com/baz"},
|
||||
{"http://foo.com/bar?a=b", "/baz?c=d", "http://foo.com/baz?c=d"},
|
||||
|
||||
// Scheme-relative
|
||||
{"https://foo.com/bar?a=b", "//bar.com/quux", "https://bar.com/quux"},
|
||||
|
||||
// Path-relative references:
|
||||
|
||||
// ... current directory
|
||||
{"http://foo.com", ".", "http://foo.com/"},
|
||||
{"http://foo.com/bar", ".", "http://foo.com/"},
|
||||
{"http://foo.com/bar/", ".", "http://foo.com/bar/"},
|
||||
|
||||
// ... going down
|
||||
{"http://foo.com", "bar", "http://foo.com/bar"},
|
||||
{"http://foo.com/", "bar", "http://foo.com/bar"},
|
||||
{"http://foo.com/bar/baz", "quux", "http://foo.com/bar/quux"},
|
||||
|
||||
// ... going up
|
||||
{"http://foo.com/bar/baz", "../quux", "http://foo.com/quux"},
|
||||
{"http://foo.com/bar/baz", "../../../../../quux", "http://foo.com/quux"},
|
||||
{"http://foo.com/bar", "..", "http://foo.com/"},
|
||||
{"http://foo.com/bar/baz", "./..", "http://foo.com/"},
|
||||
// ".." in the middle (issue 3560)
|
||||
{"http://foo.com/bar/baz", "quux/dotdot/../tail", "http://foo.com/bar/quux/tail"},
|
||||
{"http://foo.com/bar/baz", "quux/./dotdot/../tail", "http://foo.com/bar/quux/tail"},
|
||||
{"http://foo.com/bar/baz", "quux/./dotdot/.././tail", "http://foo.com/bar/quux/tail"},
|
||||
{"http://foo.com/bar/baz", "quux/./dotdot/./../tail", "http://foo.com/bar/quux/tail"},
|
||||
{"http://foo.com/bar/baz", "quux/./dotdot/dotdot/././../../tail", "http://foo.com/bar/quux/tail"},
|
||||
{"http://foo.com/bar/baz", "quux/./dotdot/dotdot/./.././../tail", "http://foo.com/bar/quux/tail"},
|
||||
{"http://foo.com/bar/baz", "quux/./dotdot/dotdot/dotdot/./../../.././././tail", "http://foo.com/bar/quux/tail"},
|
||||
{"http://foo.com/bar/baz", "quux/./dotdot/../dotdot/../dot/./tail/..", "http://foo.com/bar/quux/dot/"},
|
||||
|
||||
// Remove any dot-segments prior to forming the target URI.
|
||||
// http://tools.ietf.org/html/rfc3986#section-5.2.4
|
||||
{"http://foo.com/dot/./dotdot/../foo/bar", "../baz", "http://foo.com/dot/baz"},
|
||||
|
||||
// Triple dot isn't special
|
||||
{"http://foo.com/bar", "...", "http://foo.com/..."},
|
||||
|
||||
// Fragment
|
||||
{"http://foo.com/bar", ".#frag", "http://foo.com/#frag"},
|
||||
|
||||
// RFC 3986: Normal Examples
|
||||
// http://tools.ietf.org/html/rfc3986#section-5.4.1
|
||||
{"http://a/b/c/d;p?q", "g:h", "g:h"},
|
||||
{"http://a/b/c/d;p?q", "g", "http://a/b/c/g"},
|
||||
{"http://a/b/c/d;p?q", "./g", "http://a/b/c/g"},
|
||||
{"http://a/b/c/d;p?q", "g/", "http://a/b/c/g/"},
|
||||
{"http://a/b/c/d;p?q", "/g", "http://a/g"},
|
||||
{"http://a/b/c/d;p?q", "//g", "http://g"},
|
||||
{"http://a/b/c/d;p?q", "?y", "http://a/b/c/d;p?y"},
|
||||
{"http://a/b/c/d;p?q", "g?y", "http://a/b/c/g?y"},
|
||||
{"http://a/b/c/d;p?q", "#s", "http://a/b/c/d;p?q#s"},
|
||||
{"http://a/b/c/d;p?q", "g#s", "http://a/b/c/g#s"},
|
||||
{"http://a/b/c/d;p?q", "g?y#s", "http://a/b/c/g?y#s"},
|
||||
{"http://a/b/c/d;p?q", ";x", "http://a/b/c/;x"},
|
||||
{"http://a/b/c/d;p?q", "g;x", "http://a/b/c/g;x"},
|
||||
{"http://a/b/c/d;p?q", "g;x?y#s", "http://a/b/c/g;x?y#s"},
|
||||
{"http://a/b/c/d;p?q", "", "http://a/b/c/d;p?q"},
|
||||
{"http://a/b/c/d;p?q", ".", "http://a/b/c/"},
|
||||
{"http://a/b/c/d;p?q", "./", "http://a/b/c/"},
|
||||
{"http://a/b/c/d;p?q", "..", "http://a/b/"},
|
||||
{"http://a/b/c/d;p?q", "../", "http://a/b/"},
|
||||
{"http://a/b/c/d;p?q", "../g", "http://a/b/g"},
|
||||
{"http://a/b/c/d;p?q", "../..", "http://a/"},
|
||||
{"http://a/b/c/d;p?q", "../../", "http://a/"},
|
||||
{"http://a/b/c/d;p?q", "../../g", "http://a/g"},
|
||||
|
||||
// RFC 3986: Abnormal Examples
|
||||
// http://tools.ietf.org/html/rfc3986#section-5.4.2
|
||||
{"http://a/b/c/d;p?q", "../../../g", "http://a/g"},
|
||||
{"http://a/b/c/d;p?q", "../../../../g", "http://a/g"},
|
||||
{"http://a/b/c/d;p?q", "/./g", "http://a/g"},
|
||||
{"http://a/b/c/d;p?q", "/../g", "http://a/g"},
|
||||
{"http://a/b/c/d;p?q", "g.", "http://a/b/c/g."},
|
||||
{"http://a/b/c/d;p?q", ".g", "http://a/b/c/.g"},
|
||||
{"http://a/b/c/d;p?q", "g..", "http://a/b/c/g.."},
|
||||
{"http://a/b/c/d;p?q", "..g", "http://a/b/c/..g"},
|
||||
{"http://a/b/c/d;p?q", "./../g", "http://a/b/g"},
|
||||
{"http://a/b/c/d;p?q", "./g/.", "http://a/b/c/g/"},
|
||||
{"http://a/b/c/d;p?q", "g/./h", "http://a/b/c/g/h"},
|
||||
{"http://a/b/c/d;p?q", "g/../h", "http://a/b/c/h"},
|
||||
{"http://a/b/c/d;p?q", "g;x=1/./y", "http://a/b/c/g;x=1/y"},
|
||||
{"http://a/b/c/d;p?q", "g;x=1/../y", "http://a/b/c/y"},
|
||||
{"http://a/b/c/d;p?q", "g?y/./x", "http://a/b/c/g?y/./x"},
|
||||
{"http://a/b/c/d;p?q", "g?y/../x", "http://a/b/c/g?y/../x"},
|
||||
{"http://a/b/c/d;p?q", "g#s/./x", "http://a/b/c/g#s/./x"},
|
||||
{"http://a/b/c/d;p?q", "g#s/../x", "http://a/b/c/g#s/../x"},
|
||||
|
||||
// Extras.
|
||||
{"https://a/b/c/d;p?q", "//g?q", "https://g?q"},
|
||||
{"https://a/b/c/d;p?q", "//g#s", "https://g#s"},
|
||||
{"https://a/b/c/d;p?q", "//g/d/e/f?y#s", "https://g/d/e/f?y#s"},
|
||||
{"https://a/b/c/d;p#s", "?y", "https://a/b/c/d;p?y"},
|
||||
{"https://a/b/c/d;p?q#s", "?y", "https://a/b/c/d;p?y"},
|
||||
}
|
||||
|
||||
func TestResolveReference(t *testing.T) {
|
||||
mustParse := func(url_ string) *url.URL {
|
||||
u, err := url.Parse(url_)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected URL to parse: %q, got error: %v", url_, err)
|
||||
}
|
||||
return u
|
||||
}
|
||||
opaque := &url.URL{Scheme: "scheme", Opaque: "opaque"}
|
||||
for _, test := range resolveReferenceTests {
|
||||
base := mustParse(test.base)
|
||||
rel := mustParse(test.rel)
|
||||
url := base.ResolveReference(rel)
|
||||
if Escape(url) != test.expected {
|
||||
t.Errorf("URL(%q).ResolveReference(%q) == %q, got %q", test.base, test.rel, test.expected, Escape(url))
|
||||
}
|
||||
// Ensure that new instances are returned.
|
||||
if base == url {
|
||||
t.Errorf("Expected URL.ResolveReference to return new URL instance.")
|
||||
}
|
||||
// Test the convenience wrapper too.
|
||||
url, err := base.Parse(test.rel)
|
||||
if err != nil {
|
||||
t.Errorf("URL(%q).Parse(%q) failed: %v", test.base, test.rel, err)
|
||||
} else if Escape(url) != test.expected {
|
||||
t.Errorf("URL(%q).Parse(%q) == %q, got %q", test.base, test.rel, test.expected, Escape(url))
|
||||
} else if base == url {
|
||||
// Ensure that new instances are returned for the wrapper too.
|
||||
t.Errorf("Expected URL.Parse to return new URL instance.")
|
||||
}
|
||||
// Ensure Opaque resets the URL.
|
||||
url = base.ResolveReference(opaque)
|
||||
if *url != *opaque {
|
||||
t.Errorf("ResolveReference failed to resolve opaque URL: want %#v, got %#v", url, opaque)
|
||||
}
|
||||
// Test the convenience wrapper with an opaque URL too.
|
||||
url, err = base.Parse("scheme:opaque")
|
||||
if err != nil {
|
||||
t.Errorf(`URL(%q).Parse("scheme:opaque") failed: %v`, test.base, err)
|
||||
} else if *url != *opaque {
|
||||
t.Errorf("Parse failed to resolve opaque URL: want %#v, got %#v", url, opaque)
|
||||
} else if base == url {
|
||||
// Ensure that new instances are returned, again.
|
||||
t.Errorf("Expected URL.Parse to return new URL instance.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type shouldEscapeTest struct {
|
||||
in byte
|
||||
mode encoding
|
||||
escape bool
|
||||
}
|
||||
|
||||
var shouldEscapeTests = []shouldEscapeTest{
|
||||
// Unreserved characters (§2.3)
|
||||
{'a', encodePath, false},
|
||||
{'a', encodeUserPassword, false},
|
||||
{'a', encodeQueryComponent, false},
|
||||
{'a', encodeFragment, false},
|
||||
{'z', encodePath, false},
|
||||
{'A', encodePath, false},
|
||||
{'Z', encodePath, false},
|
||||
{'0', encodePath, false},
|
||||
{'9', encodePath, false},
|
||||
{'-', encodePath, false},
|
||||
{'-', encodeUserPassword, false},
|
||||
{'-', encodeQueryComponent, false},
|
||||
{'-', encodeFragment, false},
|
||||
{'.', encodePath, false},
|
||||
{'_', encodePath, false},
|
||||
{'~', encodePath, false},
|
||||
|
||||
// User information (§3.2.1)
|
||||
{':', encodeUserPassword, true},
|
||||
{'/', encodeUserPassword, true},
|
||||
{'?', encodeUserPassword, true},
|
||||
{'@', encodeUserPassword, true},
|
||||
{'$', encodeUserPassword, false},
|
||||
{'&', encodeUserPassword, false},
|
||||
{'+', encodeUserPassword, false},
|
||||
{',', encodeUserPassword, false},
|
||||
{';', encodeUserPassword, false},
|
||||
{'=', encodeUserPassword, false},
|
||||
}
|
||||
|
||||
func TestShouldEscape(t *testing.T) {
|
||||
for _, tt := range shouldEscapeTests {
|
||||
if shouldEscape(tt.in, tt.mode) != tt.escape {
|
||||
t.Errorf("shouldEscape(%q, %v) returned %v; expected %v", tt.in, tt.mode, !tt.escape, tt.escape)
|
||||
}
|
||||
}
|
||||
}
|
35
vendor/github.com/armon/go-proxyproto/protocol.go
generated
vendored
35
vendor/github.com/armon/go-proxyproto/protocol.go
generated
vendored
|
@ -3,6 +3,7 @@ package proxyproto
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
@ -18,8 +19,24 @@ var (
|
|||
// to check if this connection is using the proxy protocol
|
||||
prefix = []byte("PROXY ")
|
||||
prefixLen = len(prefix)
|
||||
|
||||
ErrInvalidUpstream = errors.New("upstream connection address not trusted for PROXY information")
|
||||
)
|
||||
|
||||
// SourceChecker can be used to decide whether to trust the PROXY info or pass
|
||||
// the original connection address through. If set, the connecting address is
|
||||
// passed in as an argument. If the function returns an error due to the source
|
||||
// being disallowed, it should return ErrInvalidUpstream.
|
||||
//
|
||||
// Behavior is as follows:
|
||||
// * If error is not nil, the call to Accept() will fail. If the reason for
|
||||
// triggering this failure is due to a disallowed source, it should return
|
||||
// ErrInvalidUpstream.
|
||||
// * If bool is true, the PROXY-set address is used.
|
||||
// * If bool is false, the connection's remote address is used, rather than the
|
||||
// address claimed in the PROXY info.
|
||||
type SourceChecker func(net.Addr) (bool, error)
|
||||
|
||||
// Listener is used to wrap an underlying listener,
|
||||
// whose connections may be using the HAProxy Proxy Protocol (version 1).
|
||||
// If the connection is using the protocol, the RemoteAddr() will return
|
||||
|
@ -30,6 +47,7 @@ var (
|
|||
type Listener struct {
|
||||
Listener net.Listener
|
||||
ProxyHeaderTimeout time.Duration
|
||||
SourceCheck SourceChecker
|
||||
}
|
||||
|
||||
// Conn is used to wrap and underlying connection which
|
||||
|
@ -40,6 +58,7 @@ type Conn struct {
|
|||
conn net.Conn
|
||||
dstAddr *net.TCPAddr
|
||||
srcAddr *net.TCPAddr
|
||||
useConnRemoteAddr bool
|
||||
once sync.Once
|
||||
proxyHeaderTimeout time.Duration
|
||||
}
|
||||
|
@ -51,7 +70,19 @@ func (p *Listener) Accept() (net.Conn, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewConn(conn, p.ProxyHeaderTimeout), nil
|
||||
var useConnRemoteAddr bool
|
||||
if p.SourceCheck != nil {
|
||||
allowed, err := p.SourceCheck(conn.RemoteAddr())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !allowed {
|
||||
useConnRemoteAddr = true
|
||||
}
|
||||
}
|
||||
newConn := NewConn(conn, p.ProxyHeaderTimeout)
|
||||
newConn.useConnRemoteAddr = useConnRemoteAddr
|
||||
return newConn, nil
|
||||
}
|
||||
|
||||
// Close closes the underlying listener.
|
||||
|
@ -114,7 +145,7 @@ func (p *Conn) RemoteAddr() net.Addr {
|
|||
p.bufReader = bufio.NewReader(p.conn)
|
||||
}
|
||||
})
|
||||
if p.srcAddr != nil {
|
||||
if p.srcAddr != nil && !p.useConnRemoteAddr {
|
||||
return p.srcAddr
|
||||
}
|
||||
return p.conn.RemoteAddr()
|
||||
|
|
383
vendor/github.com/armon/go-proxyproto/protocol_test.go
generated
vendored
Normal file
383
vendor/github.com/armon/go-proxyproto/protocol_test.go
generated
vendored
Normal file
|
@ -0,0 +1,383 @@
|
|||
package proxyproto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
goodAddr = "127.0.0.1"
|
||||
badAddr = "127.0.0.2"
|
||||
errAddr = "9999.0.0.2"
|
||||
)
|
||||
|
||||
var (
|
||||
checkAddr string
|
||||
)
|
||||
|
||||
func TestPassthrough(t *testing.T) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
pl := &Listener{Listener: l}
|
||||
|
||||
go func() {
|
||||
conn, err := net.Dial("tcp", pl.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
conn.Write([]byte("ping"))
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !bytes.Equal(recv, []byte("pong")) {
|
||||
t.Fatalf("bad: %v", recv)
|
||||
}
|
||||
}()
|
||||
|
||||
conn, err := pl.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !bytes.Equal(recv, []byte("ping")) {
|
||||
t.Fatalf("bad: %v", recv)
|
||||
}
|
||||
|
||||
if _, err := conn.Write([]byte("pong")); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
clientWriteDelay := 200 * time.Millisecond
|
||||
proxyHeaderTimeout := 50 * time.Millisecond
|
||||
pl := &Listener{Listener: l, ProxyHeaderTimeout: proxyHeaderTimeout}
|
||||
|
||||
go func() {
|
||||
conn, err := net.Dial("tcp", pl.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Do not send data for a while
|
||||
time.Sleep(clientWriteDelay)
|
||||
|
||||
conn.Write([]byte("ping"))
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !bytes.Equal(recv, []byte("pong")) {
|
||||
t.Fatalf("bad: %v", recv)
|
||||
}
|
||||
}()
|
||||
|
||||
conn, err := pl.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Check the remote addr is the original 127.0.0.1
|
||||
remoteAddrStartTime := time.Now()
|
||||
addr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
if addr.IP.String() != "127.0.0.1" {
|
||||
t.Fatalf("bad: %v", addr)
|
||||
}
|
||||
remoteAddrDuration := time.Since(remoteAddrStartTime)
|
||||
|
||||
// Check RemoteAddr() call did timeout
|
||||
if remoteAddrDuration >= clientWriteDelay {
|
||||
t.Fatalf("RemoteAddr() took longer than the specified timeout: %v < %v", proxyHeaderTimeout, remoteAddrDuration)
|
||||
}
|
||||
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !bytes.Equal(recv, []byte("ping")) {
|
||||
t.Fatalf("bad: %v", recv)
|
||||
}
|
||||
|
||||
if _, err := conn.Write([]byte("pong")); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParse_ipv4(t *testing.T) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
pl := &Listener{Listener: l}
|
||||
|
||||
go func() {
|
||||
conn, err := net.Dial("tcp", pl.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Write out the header!
|
||||
header := "PROXY TCP4 10.1.1.1 20.2.2.2 1000 2000\r\n"
|
||||
conn.Write([]byte(header))
|
||||
|
||||
conn.Write([]byte("ping"))
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !bytes.Equal(recv, []byte("pong")) {
|
||||
t.Fatalf("bad: %v", recv)
|
||||
}
|
||||
}()
|
||||
|
||||
conn, err := pl.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !bytes.Equal(recv, []byte("ping")) {
|
||||
t.Fatalf("bad: %v", recv)
|
||||
}
|
||||
|
||||
if _, err := conn.Write([]byte("pong")); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Check the remote addr
|
||||
addr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
if addr.IP.String() != "10.1.1.1" {
|
||||
t.Fatalf("bad: %v", addr)
|
||||
}
|
||||
if addr.Port != 1000 {
|
||||
t.Fatalf("bad: %v", addr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParse_ipv6(t *testing.T) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
pl := &Listener{Listener: l}
|
||||
|
||||
go func() {
|
||||
conn, err := net.Dial("tcp", pl.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Write out the header!
|
||||
header := "PROXY TCP6 ffff::ffff ffff::ffff 1000 2000\r\n"
|
||||
conn.Write([]byte(header))
|
||||
|
||||
conn.Write([]byte("ping"))
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !bytes.Equal(recv, []byte("pong")) {
|
||||
t.Fatalf("bad: %v", recv)
|
||||
}
|
||||
}()
|
||||
|
||||
conn, err := pl.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !bytes.Equal(recv, []byte("ping")) {
|
||||
t.Fatalf("bad: %v", recv)
|
||||
}
|
||||
|
||||
if _, err := conn.Write([]byte("pong")); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Check the remote addr
|
||||
addr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
if addr.IP.String() != "ffff::ffff" {
|
||||
t.Fatalf("bad: %v", addr)
|
||||
}
|
||||
if addr.Port != 1000 {
|
||||
t.Fatalf("bad: %v", addr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParse_BadHeader(t *testing.T) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
pl := &Listener{Listener: l}
|
||||
|
||||
go func() {
|
||||
conn, err := net.Dial("tcp", pl.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Write out the header!
|
||||
header := "PROXY TCP4 what 127.0.0.1 1000 2000\r\n"
|
||||
conn.Write([]byte(header))
|
||||
|
||||
conn.Write([]byte("ping"))
|
||||
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err == nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
conn, err := pl.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Check the remote addr, should be the local addr
|
||||
addr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
if addr.IP.String() != "127.0.0.1" {
|
||||
t.Fatalf("bad: %v", addr)
|
||||
}
|
||||
|
||||
// Read should fail
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err == nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParse_ipv4_checkfunc(t *testing.T) {
|
||||
checkAddr = goodAddr
|
||||
testParse_ipv4_checkfunc(t)
|
||||
checkAddr = badAddr
|
||||
testParse_ipv4_checkfunc(t)
|
||||
checkAddr = errAddr
|
||||
testParse_ipv4_checkfunc(t)
|
||||
}
|
||||
|
||||
func testParse_ipv4_checkfunc(t *testing.T) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
checkFunc := func(addr net.Addr) (bool, error) {
|
||||
tcpAddr := addr.(*net.TCPAddr)
|
||||
if tcpAddr.IP.String() == checkAddr {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pl := &Listener{Listener: l, SourceCheck: checkFunc}
|
||||
|
||||
go func() {
|
||||
conn, err := net.Dial("tcp", pl.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Write out the header!
|
||||
header := "PROXY TCP4 10.1.1.1 20.2.2.2 1000 2000\r\n"
|
||||
conn.Write([]byte(header))
|
||||
|
||||
conn.Write([]byte("ping"))
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !bytes.Equal(recv, []byte("pong")) {
|
||||
t.Fatalf("bad: %v", recv)
|
||||
}
|
||||
}()
|
||||
|
||||
conn, err := pl.Accept()
|
||||
if err != nil {
|
||||
if checkAddr == badAddr {
|
||||
return
|
||||
}
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
recv := make([]byte, 4)
|
||||
_, err = conn.Read(recv)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !bytes.Equal(recv, []byte("ping")) {
|
||||
t.Fatalf("bad: %v", recv)
|
||||
}
|
||||
|
||||
if _, err := conn.Write([]byte("pong")); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Check the remote addr
|
||||
addr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
switch checkAddr {
|
||||
case goodAddr:
|
||||
if addr.IP.String() != "10.1.1.1" {
|
||||
t.Fatalf("bad: %v", addr)
|
||||
}
|
||||
if addr.Port != 1000 {
|
||||
t.Fatalf("bad: %v", addr)
|
||||
}
|
||||
case badAddr:
|
||||
if addr.IP.String() != "127.0.0.1" {
|
||||
t.Fatalf("bad: %v", addr)
|
||||
}
|
||||
if addr.Port == 1000 {
|
||||
t.Fatalf("bad: %v", addr)
|
||||
}
|
||||
}
|
||||
}
|
2
vendor/github.com/beorn7/perks/.gitignore
generated
vendored
Normal file
2
vendor/github.com/beorn7/perks/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
*.test
|
||||
*.prof
|
31
vendor/github.com/beorn7/perks/README.md
generated
vendored
Normal file
31
vendor/github.com/beorn7/perks/README.md
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
# Perks for Go (golang.org)
|
||||
|
||||
Perks contains the Go package quantile that computes approximate quantiles over
|
||||
an unbounded data stream within low memory and CPU bounds.
|
||||
|
||||
For more information and examples, see:
|
||||
http://godoc.org/github.com/bmizerany/perks
|
||||
|
||||
A very special thank you and shout out to Graham Cormode (Rutgers University),
|
||||
Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and
|
||||
Divesh Srivastava (AT&T Labs–Research) for their research and publication of
|
||||
[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf)
|
||||
|
||||
Thank you, also:
|
||||
* Armon Dadgar (@armon)
|
||||
* Andrew Gerrand (@nf)
|
||||
* Brad Fitzpatrick (@bradfitz)
|
||||
* Keith Rarick (@kr)
|
||||
|
||||
FAQ:
|
||||
|
||||
Q: Why not move the quantile package into the project root?
|
||||
A: I want to add more packages to perks later.
|
||||
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
26
vendor/github.com/beorn7/perks/histogram/bench_test.go
generated
vendored
Normal file
26
vendor/github.com/beorn7/perks/histogram/bench_test.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
package histogram
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkInsert10Bins(b *testing.B) {
|
||||
b.StopTimer()
|
||||
h := New(10)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
f := rand.ExpFloat64()
|
||||
h.Insert(f)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInsert100Bins(b *testing.B) {
|
||||
b.StopTimer()
|
||||
h := New(100)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
f := rand.ExpFloat64()
|
||||
h.Insert(f)
|
||||
}
|
||||
}
|
108
vendor/github.com/beorn7/perks/histogram/histogram.go
generated
vendored
Normal file
108
vendor/github.com/beorn7/perks/histogram/histogram.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
// Package histogram provides a Go implementation of BigML's histogram package
|
||||
// for Clojure/Java. It is currently experimental.
|
||||
package histogram
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Bin struct {
|
||||
Count int
|
||||
Sum float64
|
||||
}
|
||||
|
||||
func (b *Bin) Update(x *Bin) {
|
||||
b.Count += x.Count
|
||||
b.Sum += x.Sum
|
||||
}
|
||||
|
||||
func (b *Bin) Mean() float64 {
|
||||
return b.Sum / float64(b.Count)
|
||||
}
|
||||
|
||||
type Bins []*Bin
|
||||
|
||||
func (bs Bins) Len() int { return len(bs) }
|
||||
func (bs Bins) Less(i, j int) bool { return bs[i].Mean() < bs[j].Mean() }
|
||||
func (bs Bins) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
|
||||
|
||||
func (bs *Bins) Push(x interface{}) {
|
||||
*bs = append(*bs, x.(*Bin))
|
||||
}
|
||||
|
||||
func (bs *Bins) Pop() interface{} {
|
||||
return bs.remove(len(*bs) - 1)
|
||||
}
|
||||
|
||||
func (bs *Bins) remove(n int) *Bin {
|
||||
if n < 0 || len(*bs) < n {
|
||||
return nil
|
||||
}
|
||||
x := (*bs)[n]
|
||||
*bs = append((*bs)[:n], (*bs)[n+1:]...)
|
||||
return x
|
||||
}
|
||||
|
||||
type Histogram struct {
|
||||
res *reservoir
|
||||
}
|
||||
|
||||
func New(maxBins int) *Histogram {
|
||||
return &Histogram{res: newReservoir(maxBins)}
|
||||
}
|
||||
|
||||
func (h *Histogram) Insert(f float64) {
|
||||
h.res.insert(&Bin{1, f})
|
||||
h.res.compress()
|
||||
}
|
||||
|
||||
func (h *Histogram) Bins() Bins {
|
||||
return h.res.bins
|
||||
}
|
||||
|
||||
type reservoir struct {
|
||||
n int
|
||||
maxBins int
|
||||
bins Bins
|
||||
}
|
||||
|
||||
func newReservoir(maxBins int) *reservoir {
|
||||
return &reservoir{maxBins: maxBins}
|
||||
}
|
||||
|
||||
func (r *reservoir) insert(bin *Bin) {
|
||||
r.n += bin.Count
|
||||
i := sort.Search(len(r.bins), func(i int) bool {
|
||||
return r.bins[i].Mean() >= bin.Mean()
|
||||
})
|
||||
if i < 0 || i == r.bins.Len() {
|
||||
// TODO(blake): Maybe use an .insert(i, bin) instead of
|
||||
// performing the extra work of a heap.Push.
|
||||
heap.Push(&r.bins, bin)
|
||||
return
|
||||
}
|
||||
r.bins[i].Update(bin)
|
||||
}
|
||||
|
||||
func (r *reservoir) compress() {
|
||||
for r.bins.Len() > r.maxBins {
|
||||
minGapIndex := -1
|
||||
minGap := math.MaxFloat64
|
||||
for i := 0; i < r.bins.Len()-1; i++ {
|
||||
gap := gapWeight(r.bins[i], r.bins[i+1])
|
||||
if minGap > gap {
|
||||
minGap = gap
|
||||
minGapIndex = i
|
||||
}
|
||||
}
|
||||
prev := r.bins[minGapIndex]
|
||||
next := r.bins.remove(minGapIndex + 1)
|
||||
prev.Update(next)
|
||||
}
|
||||
}
|
||||
|
||||
func gapWeight(prev, next *Bin) float64 {
|
||||
return next.Mean() - prev.Mean()
|
||||
}
|
38
vendor/github.com/beorn7/perks/histogram/histogram_test.go
generated
vendored
Normal file
38
vendor/github.com/beorn7/perks/histogram/histogram_test.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
package histogram
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHistogram(t *testing.T) {
|
||||
const numPoints = 1e6
|
||||
const maxBins = 3
|
||||
|
||||
h := New(maxBins)
|
||||
for i := 0; i < numPoints; i++ {
|
||||
f := rand.ExpFloat64()
|
||||
h.Insert(f)
|
||||
}
|
||||
|
||||
bins := h.Bins()
|
||||
if g := len(bins); g > maxBins {
|
||||
t.Fatalf("got %d bins, wanted <= %d", g, maxBins)
|
||||
}
|
||||
|
||||
for _, b := range bins {
|
||||
t.Logf("%+v", b)
|
||||
}
|
||||
|
||||
if g := count(h.Bins()); g != numPoints {
|
||||
t.Fatalf("binned %d points, wanted %d", g, numPoints)
|
||||
}
|
||||
}
|
||||
|
||||
func count(bins Bins) int {
|
||||
binCounts := 0
|
||||
for _, b := range bins {
|
||||
binCounts += b.Count
|
||||
}
|
||||
return binCounts
|
||||
}
|
63
vendor/github.com/beorn7/perks/quantile/bench_test.go
generated
vendored
Normal file
63
vendor/github.com/beorn7/perks/quantile/bench_test.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
package quantile
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkInsertTargeted(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
s := NewTargeted(Targets)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) {
|
||||
s := NewTargeted(TargetsSmallEpsilon)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInsertBiased(b *testing.B) {
|
||||
s := NewLowBiased(0.01)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) {
|
||||
s := NewLowBiased(0.0001)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkQuery(b *testing.B) {
|
||||
s := NewTargeted(Targets)
|
||||
for i := float64(0); i < 1e6; i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
n := float64(b.N)
|
||||
for i := float64(0); i < n; i++ {
|
||||
s.Query(i / n)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkQuerySmallEpsilon(b *testing.B) {
|
||||
s := NewTargeted(TargetsSmallEpsilon)
|
||||
for i := float64(0); i < 1e6; i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
n := float64(b.N)
|
||||
for i := float64(0); i < n; i++ {
|
||||
s.Query(i / n)
|
||||
}
|
||||
}
|
121
vendor/github.com/beorn7/perks/quantile/example_test.go
generated
vendored
Normal file
121
vendor/github.com/beorn7/perks/quantile/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
// +build go1.1
|
||||
|
||||
package quantile_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/beorn7/perks/quantile"
|
||||
)
|
||||
|
||||
func Example_simple() {
|
||||
ch := make(chan float64)
|
||||
go sendFloats(ch)
|
||||
|
||||
// Compute the 50th, 90th, and 99th percentile.
|
||||
q := quantile.NewTargeted(map[float64]float64{
|
||||
0.50: 0.005,
|
||||
0.90: 0.001,
|
||||
0.99: 0.0001,
|
||||
})
|
||||
for v := range ch {
|
||||
q.Insert(v)
|
||||
}
|
||||
|
||||
fmt.Println("perc50:", q.Query(0.50))
|
||||
fmt.Println("perc90:", q.Query(0.90))
|
||||
fmt.Println("perc99:", q.Query(0.99))
|
||||
fmt.Println("count:", q.Count())
|
||||
// Output:
|
||||
// perc50: 5
|
||||
// perc90: 16
|
||||
// perc99: 223
|
||||
// count: 2388
|
||||
}
|
||||
|
||||
func Example_mergeMultipleStreams() {
|
||||
// Scenario:
|
||||
// We have multiple database shards. On each shard, there is a process
|
||||
// collecting query response times from the database logs and inserting
|
||||
// them into a Stream (created via NewTargeted(0.90)), much like the
|
||||
// Simple example. These processes expose a network interface for us to
|
||||
// ask them to serialize and send us the results of their
|
||||
// Stream.Samples so we may Merge and Query them.
|
||||
//
|
||||
// NOTES:
|
||||
// * These sample sets are small, allowing us to get them
|
||||
// across the network much faster than sending the entire list of data
|
||||
// points.
|
||||
//
|
||||
// * For this to work correctly, we must supply the same quantiles
|
||||
// a priori the process collecting the samples supplied to NewTargeted,
|
||||
// even if we do not plan to query them all here.
|
||||
ch := make(chan quantile.Samples)
|
||||
getDBQuerySamples(ch)
|
||||
q := quantile.NewTargeted(map[float64]float64{0.90: 0.001})
|
||||
for samples := range ch {
|
||||
q.Merge(samples)
|
||||
}
|
||||
fmt.Println("perc90:", q.Query(0.90))
|
||||
}
|
||||
|
||||
func Example_window() {
|
||||
// Scenario: We want the 90th, 95th, and 99th percentiles for each
|
||||
// minute.
|
||||
|
||||
ch := make(chan float64)
|
||||
go sendStreamValues(ch)
|
||||
|
||||
tick := time.NewTicker(1 * time.Minute)
|
||||
q := quantile.NewTargeted(map[float64]float64{
|
||||
0.90: 0.001,
|
||||
0.95: 0.0005,
|
||||
0.99: 0.0001,
|
||||
})
|
||||
for {
|
||||
select {
|
||||
case t := <-tick.C:
|
||||
flushToDB(t, q.Samples())
|
||||
q.Reset()
|
||||
case v := <-ch:
|
||||
q.Insert(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sendStreamValues(ch chan float64) {
|
||||
// Use your imagination
|
||||
}
|
||||
|
||||
func flushToDB(t time.Time, samples quantile.Samples) {
|
||||
// Use your imagination
|
||||
}
|
||||
|
||||
// This is a stub for the above example. In reality this would hit the remote
|
||||
// servers via http or something like it.
|
||||
func getDBQuerySamples(ch chan quantile.Samples) {}
|
||||
|
||||
func sendFloats(ch chan<- float64) {
|
||||
f, err := os.Open("exampledata.txt")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
sc := bufio.NewScanner(f)
|
||||
for sc.Scan() {
|
||||
b := sc.Bytes()
|
||||
v, err := strconv.ParseFloat(string(b), 64)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
ch <- v
|
||||
}
|
||||
if sc.Err() != nil {
|
||||
log.Fatal(sc.Err())
|
||||
}
|
||||
close(ch)
|
||||
}
|
2
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
2
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
|
@ -133,7 +133,7 @@ func (s *Stream) Query(q float64) float64 {
|
|||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(float64(l) * q)
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
|
|
215
vendor/github.com/beorn7/perks/quantile/stream_test.go
generated
vendored
Normal file
215
vendor/github.com/beorn7/perks/quantile/stream_test.go
generated
vendored
Normal file
|
@ -0,0 +1,215 @@
|
|||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
Targets = map[float64]float64{
|
||||
0.01: 0.001,
|
||||
0.10: 0.01,
|
||||
0.50: 0.05,
|
||||
0.90: 0.01,
|
||||
0.99: 0.001,
|
||||
}
|
||||
TargetsSmallEpsilon = map[float64]float64{
|
||||
0.01: 0.0001,
|
||||
0.10: 0.001,
|
||||
0.50: 0.005,
|
||||
0.90: 0.001,
|
||||
0.99: 0.0001,
|
||||
}
|
||||
LowQuantiles = []float64{0.01, 0.1, 0.5}
|
||||
HighQuantiles = []float64{0.99, 0.9, 0.5}
|
||||
)
|
||||
|
||||
const RelativeEpsilon = 0.01
|
||||
|
||||
func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||
sort.Float64s(a)
|
||||
for quantile, epsilon := range Targets {
|
||||
n := float64(len(a))
|
||||
k := int(quantile * n)
|
||||
if k < 1 {
|
||||
k = 1
|
||||
}
|
||||
lower := int((quantile - epsilon) * n)
|
||||
if lower < 1 {
|
||||
lower = 1
|
||||
}
|
||||
upper := int(math.Ceil((quantile + epsilon) * n))
|
||||
if upper > len(a) {
|
||||
upper = len(a)
|
||||
}
|
||||
w, min, max := a[k-1], a[lower-1], a[upper-1]
|
||||
if g := s.Query(quantile); g < min || g > max {
|
||||
t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||
sort.Float64s(a)
|
||||
for _, qu := range LowQuantiles {
|
||||
n := float64(len(a))
|
||||
k := int(qu * n)
|
||||
|
||||
lowerRank := int((1 - RelativeEpsilon) * qu * n)
|
||||
upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n))
|
||||
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
|
||||
if g := s.Query(qu); g < min || g > max {
|
||||
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||
sort.Float64s(a)
|
||||
for _, qu := range HighQuantiles {
|
||||
n := float64(len(a))
|
||||
k := int(qu * n)
|
||||
|
||||
lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n)
|
||||
upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n))
|
||||
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
|
||||
if g := s.Query(qu); g < min || g > max {
|
||||
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func populateStream(s *Stream) []float64 {
|
||||
a := make([]float64, 0, 1e5+100)
|
||||
for i := 0; i < cap(a); i++ {
|
||||
v := rand.NormFloat64()
|
||||
// Add 5% asymmetric outliers.
|
||||
if i%20 == 0 {
|
||||
v = v*v + 1
|
||||
}
|
||||
s.Insert(v)
|
||||
a = append(a, v)
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func TestTargetedQuery(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s := NewTargeted(Targets)
|
||||
a := populateStream(s)
|
||||
verifyPercsWithAbsoluteEpsilon(t, a, s)
|
||||
}
|
||||
|
||||
func TestTargetedQuerySmallSampleSize(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s := NewTargeted(TargetsSmallEpsilon)
|
||||
a := []float64{1, 2, 3, 4, 5}
|
||||
for _, v := range a {
|
||||
s.Insert(v)
|
||||
}
|
||||
verifyPercsWithAbsoluteEpsilon(t, a, s)
|
||||
// If not yet flushed, results should be precise:
|
||||
if !s.flushed() {
|
||||
for φ, want := range map[float64]float64{
|
||||
0.01: 1,
|
||||
0.10: 1,
|
||||
0.50: 3,
|
||||
0.90: 5,
|
||||
0.99: 5,
|
||||
} {
|
||||
if got := s.Query(φ); got != want {
|
||||
t.Errorf("want %f for φ=%f, got %f", want, φ, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLowBiasedQuery(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s := NewLowBiased(RelativeEpsilon)
|
||||
a := populateStream(s)
|
||||
verifyLowPercsWithRelativeEpsilon(t, a, s)
|
||||
}
|
||||
|
||||
func TestHighBiasedQuery(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s := NewHighBiased(RelativeEpsilon)
|
||||
a := populateStream(s)
|
||||
verifyHighPercsWithRelativeEpsilon(t, a, s)
|
||||
}
|
||||
|
||||
// BrokenTestTargetedMerge is broken, see Merge doc comment.
|
||||
func BrokenTestTargetedMerge(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s1 := NewTargeted(Targets)
|
||||
s2 := NewTargeted(Targets)
|
||||
a := populateStream(s1)
|
||||
a = append(a, populateStream(s2)...)
|
||||
s1.Merge(s2.Samples())
|
||||
verifyPercsWithAbsoluteEpsilon(t, a, s1)
|
||||
}
|
||||
|
||||
// BrokenTestLowBiasedMerge is broken, see Merge doc comment.
|
||||
func BrokenTestLowBiasedMerge(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s1 := NewLowBiased(RelativeEpsilon)
|
||||
s2 := NewLowBiased(RelativeEpsilon)
|
||||
a := populateStream(s1)
|
||||
a = append(a, populateStream(s2)...)
|
||||
s1.Merge(s2.Samples())
|
||||
verifyLowPercsWithRelativeEpsilon(t, a, s2)
|
||||
}
|
||||
|
||||
// BrokenTestHighBiasedMerge is broken, see Merge doc comment.
|
||||
func BrokenTestHighBiasedMerge(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s1 := NewHighBiased(RelativeEpsilon)
|
||||
s2 := NewHighBiased(RelativeEpsilon)
|
||||
a := populateStream(s1)
|
||||
a = append(a, populateStream(s2)...)
|
||||
s1.Merge(s2.Samples())
|
||||
verifyHighPercsWithRelativeEpsilon(t, a, s2)
|
||||
}
|
||||
|
||||
func TestUncompressed(t *testing.T) {
|
||||
q := NewTargeted(Targets)
|
||||
for i := 100; i > 0; i-- {
|
||||
q.Insert(float64(i))
|
||||
}
|
||||
if g := q.Count(); g != 100 {
|
||||
t.Errorf("want count 100, got %d", g)
|
||||
}
|
||||
// Before compression, Query should have 100% accuracy.
|
||||
for quantile := range Targets {
|
||||
w := quantile * 100
|
||||
if g := q.Query(quantile); g != w {
|
||||
t.Errorf("want %f, got %f", w, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUncompressedSamples(t *testing.T) {
|
||||
q := NewTargeted(map[float64]float64{0.99: 0.001})
|
||||
for i := 1; i <= 100; i++ {
|
||||
q.Insert(float64(i))
|
||||
}
|
||||
if g := q.Samples().Len(); g != 100 {
|
||||
t.Errorf("want count 100, got %d", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUncompressedOne(t *testing.T) {
|
||||
q := NewTargeted(map[float64]float64{0.99: 0.01})
|
||||
q.Insert(3.14)
|
||||
if g := q.Query(0.90); g != 3.14 {
|
||||
t.Error("want PI, got", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaults(t *testing.T) {
|
||||
if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 {
|
||||
t.Errorf("want 0, got %f", g)
|
||||
}
|
||||
}
|
90
vendor/github.com/beorn7/perks/topk/topk.go
generated
vendored
Normal file
90
vendor/github.com/beorn7/perks/topk/topk.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
package topk
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// http://www.cs.ucsb.edu/research/tech_reports/reports/2005-23.pdf
|
||||
|
||||
type Element struct {
|
||||
Value string
|
||||
Count int
|
||||
}
|
||||
|
||||
type Samples []*Element
|
||||
|
||||
func (sm Samples) Len() int {
|
||||
return len(sm)
|
||||
}
|
||||
|
||||
func (sm Samples) Less(i, j int) bool {
|
||||
return sm[i].Count < sm[j].Count
|
||||
}
|
||||
|
||||
func (sm Samples) Swap(i, j int) {
|
||||
sm[i], sm[j] = sm[j], sm[i]
|
||||
}
|
||||
|
||||
type Stream struct {
|
||||
k int
|
||||
mon map[string]*Element
|
||||
|
||||
// the minimum Element
|
||||
min *Element
|
||||
}
|
||||
|
||||
func New(k int) *Stream {
|
||||
s := new(Stream)
|
||||
s.k = k
|
||||
s.mon = make(map[string]*Element)
|
||||
s.min = &Element{}
|
||||
|
||||
// Track k+1 so that less frequenet items contended for that spot,
|
||||
// resulting in k being more accurate.
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Stream) Insert(x string) {
|
||||
s.insert(&Element{x, 1})
|
||||
}
|
||||
|
||||
func (s *Stream) Merge(sm Samples) {
|
||||
for _, e := range sm {
|
||||
s.insert(e)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) insert(in *Element) {
|
||||
e := s.mon[in.Value]
|
||||
if e != nil {
|
||||
e.Count++
|
||||
} else {
|
||||
if len(s.mon) < s.k+1 {
|
||||
e = &Element{in.Value, in.Count}
|
||||
s.mon[in.Value] = e
|
||||
} else {
|
||||
e = s.min
|
||||
delete(s.mon, e.Value)
|
||||
e.Value = in.Value
|
||||
e.Count += in.Count
|
||||
s.min = e
|
||||
}
|
||||
}
|
||||
if e.Count < s.min.Count {
|
||||
s.min = e
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) Query() Samples {
|
||||
var sm Samples
|
||||
for _, e := range s.mon {
|
||||
sm = append(sm, e)
|
||||
}
|
||||
sort.Sort(sort.Reverse(sm))
|
||||
|
||||
if len(sm) < s.k {
|
||||
return sm
|
||||
}
|
||||
|
||||
return sm[:s.k]
|
||||
}
|
57
vendor/github.com/beorn7/perks/topk/topk_test.go
generated
vendored
Normal file
57
vendor/github.com/beorn7/perks/topk/topk_test.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
package topk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTopK(t *testing.T) {
|
||||
stream := New(10)
|
||||
ss := []*Stream{New(10), New(10), New(10)}
|
||||
m := make(map[string]int)
|
||||
for _, s := range ss {
|
||||
for i := 0; i < 1e6; i++ {
|
||||
v := fmt.Sprintf("%x", int8(rand.ExpFloat64()))
|
||||
s.Insert(v)
|
||||
m[v]++
|
||||
}
|
||||
stream.Merge(s.Query())
|
||||
}
|
||||
|
||||
var sm Samples
|
||||
for x, s := range m {
|
||||
sm = append(sm, &Element{x, s})
|
||||
}
|
||||
sort.Sort(sort.Reverse(sm))
|
||||
|
||||
g := stream.Query()
|
||||
if len(g) != 10 {
|
||||
t.Fatalf("got %d, want 10", len(g))
|
||||
}
|
||||
for i, e := range g {
|
||||
if sm[i].Value != e.Value {
|
||||
t.Errorf("at %d: want %q, got %q", i, sm[i].Value, e.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuery(t *testing.T) {
|
||||
queryTests := []struct {
|
||||
value string
|
||||
expected int
|
||||
}{
|
||||
{"a", 1},
|
||||
{"b", 2},
|
||||
{"c", 2},
|
||||
}
|
||||
|
||||
stream := New(2)
|
||||
for _, tt := range queryTests {
|
||||
stream.Insert(tt.value)
|
||||
if n := len(stream.Query()); n != tt.expected {
|
||||
t.Errorf("want %d, got %d", tt.expected, n)
|
||||
}
|
||||
}
|
||||
}
|
14
vendor/github.com/davecgh/go-spew/.travis.yml
generated
vendored
Normal file
14
vendor/github.com/davecgh/go-spew/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.5.4
|
||||
- 1.6.3
|
||||
- 1.7
|
||||
install:
|
||||
- go get -v golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
- go test -v -tags=safe ./spew
|
||||
- go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
|
||||
after_success:
|
||||
- go get -v github.com/mattn/goveralls
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- goveralls -coverprofile=profile.cov -service=travis-ci
|
205
vendor/github.com/davecgh/go-spew/README.md
generated
vendored
Normal file
205
vendor/github.com/davecgh/go-spew/README.md
generated
vendored
Normal file
|
@ -0,0 +1,205 @@
|
|||
go-spew
|
||||
=======
|
||||
|
||||
[]
|
||||
(https://travis-ci.org/davecgh/go-spew) [![ISC License]
|
||||
(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
|
||||
(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
|
||||
(https://coveralls.io/r/davecgh/go-spew?branch=master)
|
||||
|
||||
|
||||
Go-spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging. A comprehensive suite of tests with 100% test coverage is provided
|
||||
to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
|
||||
report. Go-spew is licensed under the liberal ISC license, so it may be used in
|
||||
open source or commercial projects.
|
||||
|
||||
If you're interested in reading about how this package came to life and some
|
||||
of the challenges involved in providing a deep pretty printer, there is a blog
|
||||
post about it
|
||||
[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
|
||||
|
||||
## Documentation
|
||||
|
||||
[]
|
||||
(http://godoc.org/github.com/davecgh/go-spew/spew)
|
||||
|
||||
Full `go doc` style documentation for the project can be viewed online without
|
||||
installing this package by using the excellent GoDoc site here:
|
||||
http://godoc.org/github.com/davecgh/go-spew/spew
|
||||
|
||||
You can also view the documentation locally once the package is installed with
|
||||
the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
|
||||
http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/davecgh/go-spew/spew
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
Add this import line to the file you're working in:
|
||||
|
||||
```Go
|
||||
import "github.com/davecgh/go-spew/spew"
|
||||
```
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
|
||||
```Go
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
```
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
|
||||
compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
|
||||
and pointer addresses):
|
||||
|
||||
```Go
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
```
|
||||
|
||||
## Debugging a Web Application Example
|
||||
|
||||
Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html"
|
||||
"net/http"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
|
||||
fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
|
||||
}
|
||||
|
||||
func main() {
|
||||
http.HandleFunc("/", handler)
|
||||
http.ListenAndServe(":8080", nil)
|
||||
}
|
||||
```
|
||||
|
||||
## Sample Dump Output
|
||||
|
||||
```
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) {
|
||||
(string) "one": (bool) true
|
||||
}
|
||||
}
|
||||
([]uint8) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
```
|
||||
|
||||
## Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
```
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
```
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
```
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available via the
|
||||
spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
```
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables. This option
|
||||
relies on access to the unsafe package, so it will not have any effect when
|
||||
running in environments without access to the unsafe package such as Google
|
||||
App Engine or with the "safe" build tag specified.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of capacities
|
||||
for arrays, slices, maps and channels. This is useful when diffing data
|
||||
structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are supported,
|
||||
with other types sorted according to the reflect.Value.String() output
|
||||
which guarantees display stability. Natural map order is used by
|
||||
default.
|
||||
|
||||
* SpewKeys
|
||||
SpewKeys specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only considered
|
||||
if SortKeys is true.
|
||||
|
||||
```
|
||||
|
||||
## Unsafe Package Dependency
|
||||
|
||||
This package relies on the unsafe package to perform some of the more advanced
|
||||
features, however it also supports a "limited" mode which allows it to work in
|
||||
environments where the unsafe package is not available. By default, it will
|
||||
operate in this mode on Google App Engine and when compiled with GopherJS. The
|
||||
"safe" build tag may also be specified to force the package to build without
|
||||
using the unsafe package.
|
||||
|
||||
## License
|
||||
|
||||
Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.
|
22
vendor/github.com/davecgh/go-spew/cov_report.sh
generated
vendored
Normal file
22
vendor/github.com/davecgh/go-spew/cov_report.sh
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script uses gocov to generate a test coverage report.
|
||||
# The gocov tool my be obtained with the following command:
|
||||
# go get github.com/axw/gocov/gocov
|
||||
#
|
||||
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
|
||||
|
||||
# Check for gocov.
|
||||
if ! type gocov >/dev/null 2>&1; then
|
||||
echo >&2 "This script requires the gocov tool."
|
||||
echo >&2 "You may obtain it with the following command:"
|
||||
echo >&2 "go get github.com/axw/gocov/gocov"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Only run the cgo tests if gcc is installed.
|
||||
if type gcc >/dev/null 2>&1; then
|
||||
(cd spew && gocov test -tags testcgo | gocov report)
|
||||
else
|
||||
(cd spew && gocov test | gocov report)
|
||||
fi
|
2
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
2
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
|
@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
|||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
|
|
298
vendor/github.com/davecgh/go-spew/spew/common_test.go
generated
vendored
Normal file
298
vendor/github.com/davecgh/go-spew/spew/common_test.go
generated
vendored
Normal file
|
@ -0,0 +1,298 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
// custom type to test Stinger interface on non-pointer receiver.
|
||||
type stringer string
|
||||
|
||||
// String implements the Stringer interface for testing invocation of custom
|
||||
// stringers on types with non-pointer receivers.
|
||||
func (s stringer) String() string {
|
||||
return "stringer " + string(s)
|
||||
}
|
||||
|
||||
// custom type to test Stinger interface on pointer receiver.
|
||||
type pstringer string
|
||||
|
||||
// String implements the Stringer interface for testing invocation of custom
|
||||
// stringers on types with only pointer receivers.
|
||||
func (s *pstringer) String() string {
|
||||
return "stringer " + string(*s)
|
||||
}
|
||||
|
||||
// xref1 and xref2 are cross referencing structs for testing circular reference
|
||||
// detection.
|
||||
type xref1 struct {
|
||||
ps2 *xref2
|
||||
}
|
||||
type xref2 struct {
|
||||
ps1 *xref1
|
||||
}
|
||||
|
||||
// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
|
||||
// reference for testing detection.
|
||||
type indirCir1 struct {
|
||||
ps2 *indirCir2
|
||||
}
|
||||
type indirCir2 struct {
|
||||
ps3 *indirCir3
|
||||
}
|
||||
type indirCir3 struct {
|
||||
ps1 *indirCir1
|
||||
}
|
||||
|
||||
// embed is used to test embedded structures.
|
||||
type embed struct {
|
||||
a string
|
||||
}
|
||||
|
||||
// embedwrap is used to test embedded structures.
|
||||
type embedwrap struct {
|
||||
*embed
|
||||
e *embed
|
||||
}
|
||||
|
||||
// panicer is used to intentionally cause a panic for testing spew properly
|
||||
// handles them
|
||||
type panicer int
|
||||
|
||||
func (p panicer) String() string {
|
||||
panic("test panic")
|
||||
}
|
||||
|
||||
// customError is used to test custom error interface invocation.
|
||||
type customError int
|
||||
|
||||
func (e customError) Error() string {
|
||||
return fmt.Sprintf("error: %d", int(e))
|
||||
}
|
||||
|
||||
// stringizeWants converts a slice of wanted test output into a format suitable
|
||||
// for a test error message.
|
||||
func stringizeWants(wants []string) string {
|
||||
s := ""
|
||||
for i, want := range wants {
|
||||
if i > 0 {
|
||||
s += fmt.Sprintf("want%d: %s", i+1, want)
|
||||
} else {
|
||||
s += "want: " + want
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// testFailed returns whether or not a test failed by checking if the result
|
||||
// of the test is in the slice of wanted strings.
|
||||
func testFailed(result string, wants []string) bool {
|
||||
for _, want := range wants {
|
||||
if result == want {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type sortableStruct struct {
|
||||
x int
|
||||
}
|
||||
|
||||
func (ss sortableStruct) String() string {
|
||||
return fmt.Sprintf("ss.%d", ss.x)
|
||||
}
|
||||
|
||||
type unsortableStruct struct {
|
||||
x int
|
||||
}
|
||||
|
||||
type sortTestCase struct {
|
||||
input []reflect.Value
|
||||
expected []reflect.Value
|
||||
}
|
||||
|
||||
func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {
|
||||
getInterfaces := func(values []reflect.Value) []interface{} {
|
||||
interfaces := []interface{}{}
|
||||
for _, v := range values {
|
||||
interfaces = append(interfaces, v.Interface())
|
||||
}
|
||||
return interfaces
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
spew.SortValues(test.input, cs)
|
||||
// reflect.DeepEqual cannot really make sense of reflect.Value,
|
||||
// probably because of all the pointer tricks. For instance,
|
||||
// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
|
||||
// instead.
|
||||
input := getInterfaces(test.input)
|
||||
expected := getInterfaces(test.expected)
|
||||
if !reflect.DeepEqual(input, expected) {
|
||||
t.Errorf("Sort mismatch:\n %v != %v", input, expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSortValues ensures the sort functionality for relect.Value based sorting
|
||||
// works as intended.
|
||||
func TestSortValues(t *testing.T) {
|
||||
v := reflect.ValueOf
|
||||
|
||||
a := v("a")
|
||||
b := v("b")
|
||||
c := v("c")
|
||||
embedA := v(embed{"a"})
|
||||
embedB := v(embed{"b"})
|
||||
embedC := v(embed{"c"})
|
||||
tests := []sortTestCase{
|
||||
// No values.
|
||||
{
|
||||
[]reflect.Value{},
|
||||
[]reflect.Value{},
|
||||
},
|
||||
// Bools.
|
||||
{
|
||||
[]reflect.Value{v(false), v(true), v(false)},
|
||||
[]reflect.Value{v(false), v(false), v(true)},
|
||||
},
|
||||
// Ints.
|
||||
{
|
||||
[]reflect.Value{v(2), v(1), v(3)},
|
||||
[]reflect.Value{v(1), v(2), v(3)},
|
||||
},
|
||||
// Uints.
|
||||
{
|
||||
[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
|
||||
[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
|
||||
},
|
||||
// Floats.
|
||||
{
|
||||
[]reflect.Value{v(2.0), v(1.0), v(3.0)},
|
||||
[]reflect.Value{v(1.0), v(2.0), v(3.0)},
|
||||
},
|
||||
// Strings.
|
||||
{
|
||||
[]reflect.Value{b, a, c},
|
||||
[]reflect.Value{a, b, c},
|
||||
},
|
||||
// Array
|
||||
{
|
||||
[]reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},
|
||||
[]reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},
|
||||
},
|
||||
// Uintptrs.
|
||||
{
|
||||
[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
|
||||
[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
|
||||
},
|
||||
// SortableStructs.
|
||||
{
|
||||
// Note: not sorted - DisableMethods is set.
|
||||
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
|
||||
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
|
||||
},
|
||||
// UnsortableStructs.
|
||||
{
|
||||
// Note: not sorted - SpewKeys is false.
|
||||
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
|
||||
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
|
||||
},
|
||||
// Invalid.
|
||||
{
|
||||
[]reflect.Value{embedB, embedA, embedC},
|
||||
[]reflect.Value{embedB, embedA, embedC},
|
||||
},
|
||||
}
|
||||
cs := spew.ConfigState{DisableMethods: true, SpewKeys: false}
|
||||
helpTestSortValues(tests, &cs, t)
|
||||
}
|
||||
|
||||
// TestSortValuesWithMethods ensures the sort functionality for relect.Value
|
||||
// based sorting works as intended when using string methods.
|
||||
func TestSortValuesWithMethods(t *testing.T) {
|
||||
v := reflect.ValueOf
|
||||
|
||||
a := v("a")
|
||||
b := v("b")
|
||||
c := v("c")
|
||||
tests := []sortTestCase{
|
||||
// Ints.
|
||||
{
|
||||
[]reflect.Value{v(2), v(1), v(3)},
|
||||
[]reflect.Value{v(1), v(2), v(3)},
|
||||
},
|
||||
// Strings.
|
||||
{
|
||||
[]reflect.Value{b, a, c},
|
||||
[]reflect.Value{a, b, c},
|
||||
},
|
||||
// SortableStructs.
|
||||
{
|
||||
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
|
||||
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
|
||||
},
|
||||
// UnsortableStructs.
|
||||
{
|
||||
// Note: not sorted - SpewKeys is false.
|
||||
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
|
||||
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
|
||||
},
|
||||
}
|
||||
cs := spew.ConfigState{DisableMethods: false, SpewKeys: false}
|
||||
helpTestSortValues(tests, &cs, t)
|
||||
}
|
||||
|
||||
// TestSortValuesWithSpew ensures the sort functionality for relect.Value
|
||||
// based sorting works as intended when using spew to stringify keys.
|
||||
func TestSortValuesWithSpew(t *testing.T) {
|
||||
v := reflect.ValueOf
|
||||
|
||||
a := v("a")
|
||||
b := v("b")
|
||||
c := v("c")
|
||||
tests := []sortTestCase{
|
||||
// Ints.
|
||||
{
|
||||
[]reflect.Value{v(2), v(1), v(3)},
|
||||
[]reflect.Value{v(1), v(2), v(3)},
|
||||
},
|
||||
// Strings.
|
||||
{
|
||||
[]reflect.Value{b, a, c},
|
||||
[]reflect.Value{a, b, c},
|
||||
},
|
||||
// SortableStructs.
|
||||
{
|
||||
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
|
||||
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
|
||||
},
|
||||
// UnsortableStructs.
|
||||
{
|
||||
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
|
||||
[]reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},
|
||||
},
|
||||
}
|
||||
cs := spew.ConfigState{DisableMethods: true, SpewKeys: true}
|
||||
helpTestSortValues(tests, &cs, t)
|
||||
}
|
1042
vendor/github.com/davecgh/go-spew/spew/dump_test.go
generated
vendored
Normal file
1042
vendor/github.com/davecgh/go-spew/spew/dump_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
99
vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
generated
vendored
Normal file
99
vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when both cgo is supported and "-tags testcgo" is added to the go test
|
||||
// command line. This means the cgo tests are only added (and hence run) when
|
||||
// specifially requested. This configuration is used because spew itself
|
||||
// does not require cgo to run even though it does handle certain cgo types
|
||||
// specially. Rather than forcing all clients to require cgo and an external
|
||||
// C compiler just to run the tests, this scheme makes them optional.
|
||||
// +build cgo,testcgo
|
||||
|
||||
package spew_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/davecgh/go-spew/spew/testdata"
|
||||
)
|
||||
|
||||
func addCgoDumpTests() {
|
||||
// C char pointer.
|
||||
v := testdata.GetCgoCharPointer()
|
||||
nv := testdata.GetCgoNullCharPointer()
|
||||
pv := &v
|
||||
vcAddr := fmt.Sprintf("%p", v)
|
||||
vAddr := fmt.Sprintf("%p", pv)
|
||||
pvAddr := fmt.Sprintf("%p", &pv)
|
||||
vt := "*testdata._Ctype_char"
|
||||
vs := "116"
|
||||
addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
|
||||
addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
|
||||
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
|
||||
addDumpTest(nv, "("+vt+")(<nil>)\n")
|
||||
|
||||
// C char array.
|
||||
v2, v2l, v2c := testdata.GetCgoCharArray()
|
||||
v2Len := fmt.Sprintf("%d", v2l)
|
||||
v2Cap := fmt.Sprintf("%d", v2c)
|
||||
v2t := "[6]testdata._Ctype_char"
|
||||
v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
|
||||
"{\n 00000000 74 65 73 74 32 00 " +
|
||||
" |test2.|\n}"
|
||||
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
|
||||
|
||||
// C unsigned char array.
|
||||
v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
|
||||
v3Len := fmt.Sprintf("%d", v3l)
|
||||
v3Cap := fmt.Sprintf("%d", v3c)
|
||||
v3t := "[6]testdata._Ctype_unsignedchar"
|
||||
v3t2 := "[6]testdata._Ctype_uchar"
|
||||
v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
|
||||
"{\n 00000000 74 65 73 74 33 00 " +
|
||||
" |test3.|\n}"
|
||||
addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n")
|
||||
|
||||
// C signed char array.
|
||||
v4, v4l, v4c := testdata.GetCgoSignedCharArray()
|
||||
v4Len := fmt.Sprintf("%d", v4l)
|
||||
v4Cap := fmt.Sprintf("%d", v4c)
|
||||
v4t := "[6]testdata._Ctype_schar"
|
||||
v4t2 := "testdata._Ctype_schar"
|
||||
v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
|
||||
"{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
|
||||
") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
|
||||
") 0\n}"
|
||||
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
|
||||
|
||||
// C uint8_t array.
|
||||
v5, v5l, v5c := testdata.GetCgoUint8tArray()
|
||||
v5Len := fmt.Sprintf("%d", v5l)
|
||||
v5Cap := fmt.Sprintf("%d", v5c)
|
||||
v5t := "[6]testdata._Ctype_uint8_t"
|
||||
v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
|
||||
"{\n 00000000 74 65 73 74 35 00 " +
|
||||
" |test5.|\n}"
|
||||
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
|
||||
|
||||
// C typedefed unsigned char array.
|
||||
v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
|
||||
v6Len := fmt.Sprintf("%d", v6l)
|
||||
v6Cap := fmt.Sprintf("%d", v6c)
|
||||
v6t := "[6]testdata._Ctype_custom_uchar_t"
|
||||
v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
|
||||
"{\n 00000000 74 65 73 74 36 00 " +
|
||||
" |test6.|\n}"
|
||||
addDumpTest(v6, "("+v6t+") "+v6s+"\n")
|
||||
}
|
26
vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
generated
vendored
Normal file
26
vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
// Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when either cgo is not supported or "-tags testcgo" is not added to the go
|
||||
// test command line. This file intentionally does not setup any cgo tests in
|
||||
// this scenario.
|
||||
// +build !cgo !testcgo
|
||||
|
||||
package spew_test
|
||||
|
||||
func addCgoDumpTests() {
|
||||
// Don't add any tests for cgo since this file is only compiled when
|
||||
// there should not be any cgo tests.
|
||||
}
|
226
vendor/github.com/davecgh/go-spew/spew/example_test.go
generated
vendored
Normal file
226
vendor/github.com/davecgh/go-spew/spew/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,226 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
type Flag int
|
||||
|
||||
const (
|
||||
flagOne Flag = iota
|
||||
flagTwo
|
||||
)
|
||||
|
||||
var flagStrings = map[Flag]string{
|
||||
flagOne: "flagOne",
|
||||
flagTwo: "flagTwo",
|
||||
}
|
||||
|
||||
func (f Flag) String() string {
|
||||
if s, ok := flagStrings[f]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("Unknown flag (%d)", int(f))
|
||||
}
|
||||
|
||||
type Bar struct {
|
||||
data uintptr
|
||||
}
|
||||
|
||||
type Foo struct {
|
||||
unexportedField Bar
|
||||
ExportedField map[interface{}]interface{}
|
||||
}
|
||||
|
||||
// This example demonstrates how to use Dump to dump variables to stdout.
|
||||
func ExampleDump() {
|
||||
// The following package level declarations are assumed for this example:
|
||||
/*
|
||||
type Flag int
|
||||
|
||||
const (
|
||||
flagOne Flag = iota
|
||||
flagTwo
|
||||
)
|
||||
|
||||
var flagStrings = map[Flag]string{
|
||||
flagOne: "flagOne",
|
||||
flagTwo: "flagTwo",
|
||||
}
|
||||
|
||||
func (f Flag) String() string {
|
||||
if s, ok := flagStrings[f]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("Unknown flag (%d)", int(f))
|
||||
}
|
||||
|
||||
type Bar struct {
|
||||
data uintptr
|
||||
}
|
||||
|
||||
type Foo struct {
|
||||
unexportedField Bar
|
||||
ExportedField map[interface{}]interface{}
|
||||
}
|
||||
*/
|
||||
|
||||
// Setup some sample data structures for the example.
|
||||
bar := Bar{uintptr(0)}
|
||||
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
|
||||
f := Flag(5)
|
||||
b := []byte{
|
||||
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
|
||||
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
|
||||
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
|
||||
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
|
||||
0x31, 0x32,
|
||||
}
|
||||
|
||||
// Dump!
|
||||
spew.Dump(s1, f, b)
|
||||
|
||||
// Output:
|
||||
// (spew_test.Foo) {
|
||||
// unexportedField: (spew_test.Bar) {
|
||||
// data: (uintptr) <nil>
|
||||
// },
|
||||
// ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
// (string) (len=3) "one": (bool) true
|
||||
// }
|
||||
// }
|
||||
// (spew_test.Flag) Unknown flag (5)
|
||||
// ([]uint8) (len=34 cap=34) {
|
||||
// 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
// 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
// 00000020 31 32 |12|
|
||||
// }
|
||||
//
|
||||
}
|
||||
|
||||
// This example demonstrates how to use Printf to display a variable with a
|
||||
// format string and inline formatting.
|
||||
func ExamplePrintf() {
|
||||
// Create a double pointer to a uint 8.
|
||||
ui8 := uint8(5)
|
||||
pui8 := &ui8
|
||||
ppui8 := &pui8
|
||||
|
||||
// Create a circular data type.
|
||||
type circular struct {
|
||||
ui8 uint8
|
||||
c *circular
|
||||
}
|
||||
c := circular{ui8: 1}
|
||||
c.c = &c
|
||||
|
||||
// Print!
|
||||
spew.Printf("ppui8: %v\n", ppui8)
|
||||
spew.Printf("circular: %v\n", c)
|
||||
|
||||
// Output:
|
||||
// ppui8: <**>5
|
||||
// circular: {1 <*>{1 <*><shown>}}
|
||||
}
|
||||
|
||||
// This example demonstrates how to use a ConfigState.
|
||||
func ExampleConfigState() {
|
||||
// Modify the indent level of the ConfigState only. The global
|
||||
// configuration is not modified.
|
||||
scs := spew.ConfigState{Indent: "\t"}
|
||||
|
||||
// Output using the ConfigState instance.
|
||||
v := map[string]int{"one": 1}
|
||||
scs.Printf("v: %v\n", v)
|
||||
scs.Dump(v)
|
||||
|
||||
// Output:
|
||||
// v: map[one:1]
|
||||
// (map[string]int) (len=1) {
|
||||
// (string) (len=3) "one": (int) 1
|
||||
// }
|
||||
}
|
||||
|
||||
// This example demonstrates how to use ConfigState.Dump to dump variables to
|
||||
// stdout
|
||||
func ExampleConfigState_Dump() {
|
||||
// See the top-level Dump example for details on the types used in this
|
||||
// example.
|
||||
|
||||
// Create two ConfigState instances with different indentation.
|
||||
scs := spew.ConfigState{Indent: "\t"}
|
||||
scs2 := spew.ConfigState{Indent: " "}
|
||||
|
||||
// Setup some sample data structures for the example.
|
||||
bar := Bar{uintptr(0)}
|
||||
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
|
||||
|
||||
// Dump using the ConfigState instances.
|
||||
scs.Dump(s1)
|
||||
scs2.Dump(s1)
|
||||
|
||||
// Output:
|
||||
// (spew_test.Foo) {
|
||||
// unexportedField: (spew_test.Bar) {
|
||||
// data: (uintptr) <nil>
|
||||
// },
|
||||
// ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
// (string) (len=3) "one": (bool) true
|
||||
// }
|
||||
// }
|
||||
// (spew_test.Foo) {
|
||||
// unexportedField: (spew_test.Bar) {
|
||||
// data: (uintptr) <nil>
|
||||
// },
|
||||
// ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
// (string) (len=3) "one": (bool) true
|
||||
// }
|
||||
// }
|
||||
//
|
||||
}
|
||||
|
||||
// This example demonstrates how to use ConfigState.Printf to display a variable
|
||||
// with a format string and inline formatting.
|
||||
func ExampleConfigState_Printf() {
|
||||
// See the top-level Dump example for details on the types used in this
|
||||
// example.
|
||||
|
||||
// Create two ConfigState instances and modify the method handling of the
|
||||
// first ConfigState only.
|
||||
scs := spew.NewDefaultConfig()
|
||||
scs2 := spew.NewDefaultConfig()
|
||||
scs.DisableMethods = true
|
||||
|
||||
// Alternatively
|
||||
// scs := spew.ConfigState{Indent: " ", DisableMethods: true}
|
||||
// scs2 := spew.ConfigState{Indent: " "}
|
||||
|
||||
// This is of type Flag which implements a Stringer and has raw value 1.
|
||||
f := flagTwo
|
||||
|
||||
// Dump using the ConfigState instances.
|
||||
scs.Printf("f: %v\n", f)
|
||||
scs2.Printf("f: %v\n", f)
|
||||
|
||||
// Output:
|
||||
// f: 1
|
||||
// f: flagTwo
|
||||
}
|
1558
vendor/github.com/davecgh/go-spew/spew/format_test.go
generated
vendored
Normal file
1558
vendor/github.com/davecgh/go-spew/spew/format_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
87
vendor/github.com/davecgh/go-spew/spew/internal_test.go
generated
vendored
Normal file
87
vendor/github.com/davecgh/go-spew/spew/internal_test.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
This test file is part of the spew package rather than than the spew_test
|
||||
package because it needs access to internals to properly test certain cases
|
||||
which are not possible via the public interface since they should never happen.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// dummyFmtState implements a fake fmt.State to use for testing invalid
|
||||
// reflect.Value handling. This is necessary because the fmt package catches
|
||||
// invalid values before invoking the formatter on them.
|
||||
type dummyFmtState struct {
|
||||
bytes.Buffer
|
||||
}
|
||||
|
||||
func (dfs *dummyFmtState) Flag(f int) bool {
|
||||
if f == int('+') {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (dfs *dummyFmtState) Precision() (int, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (dfs *dummyFmtState) Width() (int, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// TestInvalidReflectValue ensures the dump and formatter code handles an
|
||||
// invalid reflect value properly. This needs access to internal state since it
|
||||
// should never happen in real code and therefore can't be tested via the public
|
||||
// API.
|
||||
func TestInvalidReflectValue(t *testing.T) {
|
||||
i := 1
|
||||
|
||||
// Dump invalid reflect value.
|
||||
v := new(reflect.Value)
|
||||
buf := new(bytes.Buffer)
|
||||
d := dumpState{w: buf, cs: &Config}
|
||||
d.dump(*v)
|
||||
s := buf.String()
|
||||
want := "<invalid>"
|
||||
if s != want {
|
||||
t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want)
|
||||
}
|
||||
i++
|
||||
|
||||
// Formatter invalid reflect value.
|
||||
buf2 := new(dummyFmtState)
|
||||
f := formatState{value: *v, cs: &Config, fs: buf2}
|
||||
f.format(*v)
|
||||
s = buf2.String()
|
||||
want = "<invalid>"
|
||||
if s != want {
|
||||
t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want)
|
||||
}
|
||||
}
|
||||
|
||||
// SortValues makes the internal sortValues function available to the test
|
||||
// package.
|
||||
func SortValues(values []reflect.Value, cs *ConfigState) {
|
||||
sortValues(values, cs)
|
||||
}
|
102
vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
generated
vendored
Normal file
102
vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build !js,!appengine,!safe,!disableunsafe
|
||||
|
||||
/*
|
||||
This test file is part of the spew package rather than than the spew_test
|
||||
package because it needs access to internals to properly test certain cases
|
||||
which are not possible via the public interface since they should never happen.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
|
||||
// the maximum kind value which does not exist. This is needed to test the
|
||||
// fallback code which punts to the standard fmt library for new types that
|
||||
// might get added to the language.
|
||||
func changeKind(v *reflect.Value, readOnly bool) {
|
||||
rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))
|
||||
*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)
|
||||
if readOnly {
|
||||
*rvf |= flagRO
|
||||
} else {
|
||||
*rvf &= ^uintptr(flagRO)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddedReflectValue tests functionaly of the dump and formatter code which
|
||||
// falls back to the standard fmt library for new types that might get added to
|
||||
// the language.
|
||||
func TestAddedReflectValue(t *testing.T) {
|
||||
i := 1
|
||||
|
||||
// Dump using a reflect.Value that is exported.
|
||||
v := reflect.ValueOf(int8(5))
|
||||
changeKind(&v, false)
|
||||
buf := new(bytes.Buffer)
|
||||
d := dumpState{w: buf, cs: &Config}
|
||||
d.dump(v)
|
||||
s := buf.String()
|
||||
want := "(int8) 5"
|
||||
if s != want {
|
||||
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
|
||||
}
|
||||
i++
|
||||
|
||||
// Dump using a reflect.Value that is not exported.
|
||||
changeKind(&v, true)
|
||||
buf.Reset()
|
||||
d.dump(v)
|
||||
s = buf.String()
|
||||
want = "(int8) <int8 Value>"
|
||||
if s != want {
|
||||
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
|
||||
}
|
||||
i++
|
||||
|
||||
// Formatter using a reflect.Value that is exported.
|
||||
changeKind(&v, false)
|
||||
buf2 := new(dummyFmtState)
|
||||
f := formatState{value: v, cs: &Config, fs: buf2}
|
||||
f.format(v)
|
||||
s = buf2.String()
|
||||
want = "5"
|
||||
if s != want {
|
||||
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
|
||||
}
|
||||
i++
|
||||
|
||||
// Formatter using a reflect.Value that is not exported.
|
||||
changeKind(&v, true)
|
||||
buf2.Reset()
|
||||
f = formatState{value: v, cs: &Config, fs: buf2}
|
||||
f.format(v)
|
||||
s = buf2.String()
|
||||
want = "<int8 Value>"
|
||||
if s != want {
|
||||
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
|
||||
}
|
||||
}
|
320
vendor/github.com/davecgh/go-spew/spew/spew_test.go
generated
vendored
Normal file
320
vendor/github.com/davecgh/go-spew/spew/spew_test.go
generated
vendored
Normal file
|
@ -0,0 +1,320 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
// spewFunc is used to identify which public function of the spew package or
|
||||
// ConfigState a test applies to.
|
||||
type spewFunc int
|
||||
|
||||
const (
|
||||
fCSFdump spewFunc = iota
|
||||
fCSFprint
|
||||
fCSFprintf
|
||||
fCSFprintln
|
||||
fCSPrint
|
||||
fCSPrintln
|
||||
fCSSdump
|
||||
fCSSprint
|
||||
fCSSprintf
|
||||
fCSSprintln
|
||||
fCSErrorf
|
||||
fCSNewFormatter
|
||||
fErrorf
|
||||
fFprint
|
||||
fFprintln
|
||||
fPrint
|
||||
fPrintln
|
||||
fSdump
|
||||
fSprint
|
||||
fSprintf
|
||||
fSprintln
|
||||
)
|
||||
|
||||
// Map of spewFunc values to names for pretty printing.
|
||||
var spewFuncStrings = map[spewFunc]string{
|
||||
fCSFdump: "ConfigState.Fdump",
|
||||
fCSFprint: "ConfigState.Fprint",
|
||||
fCSFprintf: "ConfigState.Fprintf",
|
||||
fCSFprintln: "ConfigState.Fprintln",
|
||||
fCSSdump: "ConfigState.Sdump",
|
||||
fCSPrint: "ConfigState.Print",
|
||||
fCSPrintln: "ConfigState.Println",
|
||||
fCSSprint: "ConfigState.Sprint",
|
||||
fCSSprintf: "ConfigState.Sprintf",
|
||||
fCSSprintln: "ConfigState.Sprintln",
|
||||
fCSErrorf: "ConfigState.Errorf",
|
||||
fCSNewFormatter: "ConfigState.NewFormatter",
|
||||
fErrorf: "spew.Errorf",
|
||||
fFprint: "spew.Fprint",
|
||||
fFprintln: "spew.Fprintln",
|
||||
fPrint: "spew.Print",
|
||||
fPrintln: "spew.Println",
|
||||
fSdump: "spew.Sdump",
|
||||
fSprint: "spew.Sprint",
|
||||
fSprintf: "spew.Sprintf",
|
||||
fSprintln: "spew.Sprintln",
|
||||
}
|
||||
|
||||
func (f spewFunc) String() string {
|
||||
if s, ok := spewFuncStrings[f]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("Unknown spewFunc (%d)", int(f))
|
||||
}
|
||||
|
||||
// spewTest is used to describe a test to be performed against the public
|
||||
// functions of the spew package or ConfigState.
|
||||
type spewTest struct {
|
||||
cs *spew.ConfigState
|
||||
f spewFunc
|
||||
format string
|
||||
in interface{}
|
||||
want string
|
||||
}
|
||||
|
||||
// spewTests houses the tests to be performed against the public functions of
|
||||
// the spew package and ConfigState.
|
||||
//
|
||||
// These tests are only intended to ensure the public functions are exercised
|
||||
// and are intentionally not exhaustive of types. The exhaustive type
|
||||
// tests are handled in the dump and format tests.
|
||||
var spewTests []spewTest
|
||||
|
||||
// redirStdout is a helper function to return the standard output from f as a
|
||||
// byte slice.
|
||||
func redirStdout(f func()) ([]byte, error) {
|
||||
tempFile, err := ioutil.TempFile("", "ss-test")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileName := tempFile.Name()
|
||||
defer os.Remove(fileName) // Ignore error
|
||||
|
||||
origStdout := os.Stdout
|
||||
os.Stdout = tempFile
|
||||
f()
|
||||
os.Stdout = origStdout
|
||||
tempFile.Close()
|
||||
|
||||
return ioutil.ReadFile(fileName)
|
||||
}
|
||||
|
||||
func initSpewTests() {
|
||||
// Config states with various settings.
|
||||
scsDefault := spew.NewDefaultConfig()
|
||||
scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true}
|
||||
scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true}
|
||||
scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1}
|
||||
scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true}
|
||||
scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true}
|
||||
scsNoCap := &spew.ConfigState{DisableCapacities: true}
|
||||
|
||||
// Variables for tests on types which implement Stringer interface with and
|
||||
// without a pointer receiver.
|
||||
ts := stringer("test")
|
||||
tps := pstringer("test")
|
||||
|
||||
type ptrTester struct {
|
||||
s *struct{}
|
||||
}
|
||||
tptr := &ptrTester{s: &struct{}{}}
|
||||
|
||||
// depthTester is used to test max depth handling for structs, array, slices
|
||||
// and maps.
|
||||
type depthTester struct {
|
||||
ic indirCir1
|
||||
arr [1]string
|
||||
slice []string
|
||||
m map[string]int
|
||||
}
|
||||
dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"},
|
||||
map[string]int{"one": 1}}
|
||||
|
||||
// Variable for tests on types which implement error interface.
|
||||
te := customError(10)
|
||||
|
||||
spewTests = []spewTest{
|
||||
{scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"},
|
||||
{scsDefault, fCSFprint, "", int16(32767), "32767"},
|
||||
{scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"},
|
||||
{scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"},
|
||||
{scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"},
|
||||
{scsDefault, fCSPrintln, "", uint8(255), "255\n"},
|
||||
{scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"},
|
||||
{scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"},
|
||||
{scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"},
|
||||
{scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"},
|
||||
{scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"},
|
||||
{scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"},
|
||||
{scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"},
|
||||
{scsDefault, fFprint, "", float32(3.14), "3.14"},
|
||||
{scsDefault, fFprintln, "", float64(6.28), "6.28\n"},
|
||||
{scsDefault, fPrint, "", true, "true"},
|
||||
{scsDefault, fPrintln, "", false, "false\n"},
|
||||
{scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"},
|
||||
{scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"},
|
||||
{scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"},
|
||||
{scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"},
|
||||
{scsNoMethods, fCSFprint, "", ts, "test"},
|
||||
{scsNoMethods, fCSFprint, "", &ts, "<*>test"},
|
||||
{scsNoMethods, fCSFprint, "", tps, "test"},
|
||||
{scsNoMethods, fCSFprint, "", &tps, "<*>test"},
|
||||
{scsNoPmethods, fCSFprint, "", ts, "stringer test"},
|
||||
{scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"},
|
||||
{scsNoPmethods, fCSFprint, "", tps, "test"},
|
||||
{scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"},
|
||||
{scsMaxDepth, fCSFprint, "", dt, "{{<max>} [<max>] [<max>] map[<max>]}"},
|
||||
{scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" +
|
||||
" ic: (spew_test.indirCir1) {\n <max depth reached>\n },\n" +
|
||||
" arr: ([1]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
|
||||
" slice: ([]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
|
||||
" m: (map[string]int) (len=1) {\n <max depth reached>\n }\n}\n"},
|
||||
{scsContinue, fCSFprint, "", ts, "(stringer test) test"},
|
||||
{scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " +
|
||||
"(len=4) (stringer test) \"test\"\n"},
|
||||
{scsContinue, fCSFprint, "", te, "(error: 10) 10"},
|
||||
{scsContinue, fCSFdump, "", te, "(spew_test.customError) " +
|
||||
"(error: 10) 10\n"},
|
||||
{scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"},
|
||||
{scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"},
|
||||
{scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"},
|
||||
{scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"},
|
||||
}
|
||||
}
|
||||
|
||||
// TestSpew executes all of the tests described by spewTests.
|
||||
func TestSpew(t *testing.T) {
|
||||
initSpewTests()
|
||||
|
||||
t.Logf("Running %d tests", len(spewTests))
|
||||
for i, test := range spewTests {
|
||||
buf := new(bytes.Buffer)
|
||||
switch test.f {
|
||||
case fCSFdump:
|
||||
test.cs.Fdump(buf, test.in)
|
||||
|
||||
case fCSFprint:
|
||||
test.cs.Fprint(buf, test.in)
|
||||
|
||||
case fCSFprintf:
|
||||
test.cs.Fprintf(buf, test.format, test.in)
|
||||
|
||||
case fCSFprintln:
|
||||
test.cs.Fprintln(buf, test.in)
|
||||
|
||||
case fCSPrint:
|
||||
b, err := redirStdout(func() { test.cs.Print(test.in) })
|
||||
if err != nil {
|
||||
t.Errorf("%v #%d %v", test.f, i, err)
|
||||
continue
|
||||
}
|
||||
buf.Write(b)
|
||||
|
||||
case fCSPrintln:
|
||||
b, err := redirStdout(func() { test.cs.Println(test.in) })
|
||||
if err != nil {
|
||||
t.Errorf("%v #%d %v", test.f, i, err)
|
||||
continue
|
||||
}
|
||||
buf.Write(b)
|
||||
|
||||
case fCSSdump:
|
||||
str := test.cs.Sdump(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fCSSprint:
|
||||
str := test.cs.Sprint(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fCSSprintf:
|
||||
str := test.cs.Sprintf(test.format, test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fCSSprintln:
|
||||
str := test.cs.Sprintln(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fCSErrorf:
|
||||
err := test.cs.Errorf(test.format, test.in)
|
||||
buf.WriteString(err.Error())
|
||||
|
||||
case fCSNewFormatter:
|
||||
fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))
|
||||
|
||||
case fErrorf:
|
||||
err := spew.Errorf(test.format, test.in)
|
||||
buf.WriteString(err.Error())
|
||||
|
||||
case fFprint:
|
||||
spew.Fprint(buf, test.in)
|
||||
|
||||
case fFprintln:
|
||||
spew.Fprintln(buf, test.in)
|
||||
|
||||
case fPrint:
|
||||
b, err := redirStdout(func() { spew.Print(test.in) })
|
||||
if err != nil {
|
||||
t.Errorf("%v #%d %v", test.f, i, err)
|
||||
continue
|
||||
}
|
||||
buf.Write(b)
|
||||
|
||||
case fPrintln:
|
||||
b, err := redirStdout(func() { spew.Println(test.in) })
|
||||
if err != nil {
|
||||
t.Errorf("%v #%d %v", test.f, i, err)
|
||||
continue
|
||||
}
|
||||
buf.Write(b)
|
||||
|
||||
case fSdump:
|
||||
str := spew.Sdump(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fSprint:
|
||||
str := spew.Sprint(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fSprintf:
|
||||
str := spew.Sprintf(test.format, test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fSprintln:
|
||||
str := spew.Sprintln(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
default:
|
||||
t.Errorf("%v #%d unrecognized function", test.f, i)
|
||||
continue
|
||||
}
|
||||
s := buf.String()
|
||||
if test.want != s {
|
||||
t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
82
vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
generated
vendored
Normal file
82
vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
// Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when both cgo is supported and "-tags testcgo" is added to the go test
|
||||
// command line. This code should really only be in the dumpcgo_test.go file,
|
||||
// but unfortunately Go will not allow cgo in test files, so this is a
|
||||
// workaround to allow cgo types to be tested. This configuration is used
|
||||
// because spew itself does not require cgo to run even though it does handle
|
||||
// certain cgo types specially. Rather than forcing all clients to require cgo
|
||||
// and an external C compiler just to run the tests, this scheme makes them
|
||||
// optional.
|
||||
// +build cgo,testcgo
|
||||
|
||||
package testdata
|
||||
|
||||
/*
|
||||
#include <stdint.h>
|
||||
typedef unsigned char custom_uchar_t;
|
||||
|
||||
char *ncp = 0;
|
||||
char *cp = "test";
|
||||
char ca[6] = {'t', 'e', 's', 't', '2', '\0'};
|
||||
unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'};
|
||||
signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'};
|
||||
uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'};
|
||||
custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'};
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// GetCgoNullCharPointer returns a null char pointer via cgo. This is only
|
||||
// used for tests.
|
||||
func GetCgoNullCharPointer() interface{} {
|
||||
return C.ncp
|
||||
}
|
||||
|
||||
// GetCgoCharPointer returns a char pointer via cgo. This is only used for
|
||||
// tests.
|
||||
func GetCgoCharPointer() interface{} {
|
||||
return C.cp
|
||||
}
|
||||
|
||||
// GetCgoCharArray returns a char array via cgo and the array's len and cap.
|
||||
// This is only used for tests.
|
||||
func GetCgoCharArray() (interface{}, int, int) {
|
||||
return C.ca, len(C.ca), cap(C.ca)
|
||||
}
|
||||
|
||||
// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the
|
||||
// array's len and cap. This is only used for tests.
|
||||
func GetCgoUnsignedCharArray() (interface{}, int, int) {
|
||||
return C.uca, len(C.uca), cap(C.uca)
|
||||
}
|
||||
|
||||
// GetCgoSignedCharArray returns a signed char array via cgo and the array's len
|
||||
// and cap. This is only used for tests.
|
||||
func GetCgoSignedCharArray() (interface{}, int, int) {
|
||||
return C.sca, len(C.sca), cap(C.sca)
|
||||
}
|
||||
|
||||
// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and
|
||||
// cap. This is only used for tests.
|
||||
func GetCgoUint8tArray() (interface{}, int, int) {
|
||||
return C.ui8ta, len(C.ui8ta), cap(C.ui8ta)
|
||||
}
|
||||
|
||||
// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via
|
||||
// cgo and the array's len and cap. This is only used for tests.
|
||||
func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {
|
||||
return C.tuca, len(C.tuca), cap(C.tuca)
|
||||
}
|
61
vendor/github.com/davecgh/go-spew/test_coverage.txt
generated
vendored
Normal file
61
vendor/github.com/davecgh/go-spew/test_coverage.txt
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
|
||||
github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88)
|
||||
github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82)
|
||||
github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52)
|
||||
github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44)
|
||||
github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39)
|
||||
github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30)
|
||||
github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18)
|
||||
github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13)
|
||||
github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12)
|
||||
github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11)
|
||||
github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11)
|
||||
github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10)
|
||||
github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9)
|
||||
github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8)
|
||||
github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7)
|
||||
github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5)
|
||||
github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4)
|
||||
github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4)
|
||||
github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4)
|
||||
github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3)
|
||||
github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3)
|
||||
github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3)
|
||||
github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3)
|
||||
github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3)
|
||||
github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1)
|
||||
github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505)
|
||||
|
4
vendor/github.com/dgrijalva/jwt-go/.gitignore
generated
vendored
4
vendor/github.com/dgrijalva/jwt-go/.gitignore
generated
vendored
|
@ -1,4 +0,0 @@
|
|||
.DS_Store
|
||||
bin
|
||||
|
||||
|
8
vendor/github.com/dgrijalva/jwt-go/.travis.yml
generated
vendored
8
vendor/github.com/dgrijalva/jwt-go/.travis.yml
generated
vendored
|
@ -1,8 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
8
vendor/github.com/dgrijalva/jwt-go/LICENSE
generated
vendored
8
vendor/github.com/dgrijalva/jwt-go/LICENSE
generated
vendored
|
@ -1,8 +0,0 @@
|
|||
Copyright (c) 2012 Dave Grijalva
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
96
vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
generated
vendored
96
vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
generated
vendored
|
@ -1,96 +0,0 @@
|
|||
## Migration Guide from v2 -> v3
|
||||
|
||||
Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code.
|
||||
|
||||
### `Token.Claims` is now an interface type
|
||||
|
||||
The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`.
|
||||
|
||||
`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property.
|
||||
|
||||
The old example for parsing a token looked like this..
|
||||
|
||||
```go
|
||||
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
|
||||
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
is now directly mapped to...
|
||||
|
||||
```go
|
||||
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
|
||||
claims := token.Claims.(jwt.MapClaims)
|
||||
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type.
|
||||
|
||||
```go
|
||||
type MyCustomClaims struct {
|
||||
User string
|
||||
*StandardClaims
|
||||
}
|
||||
|
||||
if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil {
|
||||
claims := token.Claims.(*MyCustomClaims)
|
||||
fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt)
|
||||
}
|
||||
```
|
||||
|
||||
### `ParseFromRequest` has been moved
|
||||
|
||||
To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`.
|
||||
|
||||
`Extractors` do the work of picking the token string out of a request. The interface is simple and composable.
|
||||
|
||||
This simple parsing example:
|
||||
|
||||
```go
|
||||
if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil {
|
||||
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
is directly mapped to:
|
||||
|
||||
```go
|
||||
if token, err := request.ParseFromRequest(tokenString, request.OAuth2Extractor, req, keyLookupFunc); err == nil {
|
||||
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
There are several concrete `Extractor` types provided for your convenience:
|
||||
|
||||
* `HeaderExtractor` will search a list of headers until one contains content.
|
||||
* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content.
|
||||
* `MultiExtractor` will try a list of `Extractors` in order until one returns content.
|
||||
* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token.
|
||||
* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument
|
||||
* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header
|
||||
|
||||
|
||||
### RSA signing methods no longer accept `[]byte` keys
|
||||
|
||||
Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse.
|
||||
|
||||
To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types.
|
||||
|
||||
```go
|
||||
func keyLookupFunc(*Token) (interface{}, error) {
|
||||
// Don't forget to validate the alg is what you expect:
|
||||
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
|
||||
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
|
||||
}
|
||||
|
||||
// Look up key
|
||||
key, err := lookupPublicKey(token.Header["kid"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unpack key from PEM encoded PKCS8
|
||||
return jwt.ParseRSAPublicKeyFromPEM(key)
|
||||
}
|
||||
```
|
85
vendor/github.com/dgrijalva/jwt-go/README.md
generated
vendored
85
vendor/github.com/dgrijalva/jwt-go/README.md
generated
vendored
|
@ -1,85 +0,0 @@
|
|||
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
|
||||
|
||||
[](https://travis-ci.org/dgrijalva/jwt-go)
|
||||
|
||||
**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
|
||||
|
||||
**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect.
|
||||
|
||||
|
||||
## What the heck is a JWT?
|
||||
|
||||
JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
|
||||
|
||||
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
|
||||
|
||||
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
|
||||
|
||||
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
|
||||
|
||||
## What's in the box?
|
||||
|
||||
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
|
||||
|
||||
## Examples
|
||||
|
||||
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
|
||||
|
||||
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
|
||||
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
|
||||
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
|
||||
|
||||
## Extensions
|
||||
|
||||
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
|
||||
|
||||
Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
|
||||
|
||||
## Compliance
|
||||
|
||||
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
|
||||
|
||||
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
|
||||
|
||||
## Project Status & Versioning
|
||||
|
||||
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
|
||||
|
||||
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
|
||||
|
||||
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning.
|
||||
|
||||
## Usage Tips
|
||||
|
||||
### Signing vs Encryption
|
||||
|
||||
A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
|
||||
|
||||
* The author of the token was in the possession of the signing secret
|
||||
* The data has not been modified since it was signed
|
||||
|
||||
It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
|
||||
|
||||
### Choosing a Signing Method
|
||||
|
||||
There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
|
||||
|
||||
Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
|
||||
|
||||
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
|
||||
|
||||
### JWT and OAuth
|
||||
|
||||
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
|
||||
|
||||
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
|
||||
|
||||
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
||||
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
|
||||
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
|
||||
|
||||
## More
|
||||
|
||||
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
|
||||
|
||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation.
|
105
vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
generated
vendored
105
vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
generated
vendored
|
@ -1,105 +0,0 @@
|
|||
## `jwt-go` Version History
|
||||
|
||||
#### 3.0.0
|
||||
|
||||
* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
|
||||
* Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
|
||||
* `ParseFromRequest` has been moved to `request` subpackage and usage has changed
|
||||
* The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
|
||||
* Other Additions and Changes
|
||||
* Added `Claims` interface type to allow users to decode the claims into a custom type
|
||||
* Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
|
||||
* Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
|
||||
* Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
|
||||
* Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
|
||||
* Added several new, more specific, validation errors to error type bitmask
|
||||
* Moved examples from README to executable example files
|
||||
* Signing method registry is now thread safe
|
||||
* Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
|
||||
|
||||
#### 2.7.0
|
||||
|
||||
This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
|
||||
|
||||
* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
|
||||
* Error text for expired tokens includes how long it's been expired
|
||||
* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
|
||||
* Documentation updates
|
||||
|
||||
#### 2.6.0
|
||||
|
||||
* Exposed inner error within ValidationError
|
||||
* Fixed validation errors when using UseJSONNumber flag
|
||||
* Added several unit tests
|
||||
|
||||
#### 2.5.0
|
||||
|
||||
* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
|
||||
* Updated/fixed some documentation
|
||||
* Added more helpful error message when trying to parse tokens that begin with `BEARER `
|
||||
|
||||
#### 2.4.0
|
||||
|
||||
* Added new type, Parser, to allow for configuration of various parsing parameters
|
||||
* You can now specify a list of valid signing methods. Anything outside this set will be rejected.
|
||||
* You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
|
||||
* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
|
||||
* Fixed some bugs with ECDSA parsing
|
||||
|
||||
#### 2.3.0
|
||||
|
||||
* Added support for ECDSA signing methods
|
||||
* Added support for RSA PSS signing methods (requires go v1.4)
|
||||
|
||||
#### 2.2.0
|
||||
|
||||
* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
|
||||
|
||||
#### 2.1.0
|
||||
|
||||
Backwards compatible API change that was missed in 2.0.0.
|
||||
|
||||
* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
|
||||
|
||||
#### 2.0.0
|
||||
|
||||
There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
|
||||
|
||||
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
|
||||
|
||||
It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
|
||||
|
||||
* **Compatibility Breaking Changes**
|
||||
* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
|
||||
* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
|
||||
* `KeyFunc` now returns `interface{}` instead of `[]byte`
|
||||
* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
|
||||
* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
|
||||
* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
|
||||
* Added public package global `SigningMethodHS256`
|
||||
* Added public package global `SigningMethodHS384`
|
||||
* Added public package global `SigningMethodHS512`
|
||||
* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
|
||||
* Added public package global `SigningMethodRS256`
|
||||
* Added public package global `SigningMethodRS384`
|
||||
* Added public package global `SigningMethodRS512`
|
||||
* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
|
||||
* Refactored the RSA implementation to be easier to read
|
||||
* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
|
||||
|
||||
#### 1.0.2
|
||||
|
||||
* Fixed bug in parsing public keys from certificates
|
||||
* Added more tests around the parsing of keys for RS256
|
||||
* Code refactoring in RS256 implementation. No functional changes
|
||||
|
||||
#### 1.0.1
|
||||
|
||||
* Fixed panic if RS256 signing method was passed an invalid key
|
||||
|
||||
#### 1.0.0
|
||||
|
||||
* First versioned release
|
||||
* API stabilized
|
||||
* Supports creating, signing, parsing, and validating JWT tokens
|
||||
* Supports RS256 and HS256 signing methods
|
134
vendor/github.com/dgrijalva/jwt-go/claims.go
generated
vendored
134
vendor/github.com/dgrijalva/jwt-go/claims.go
generated
vendored
|
@ -1,134 +0,0 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// For a type to be a Claims object, it must just have a Valid method that determines
|
||||
// if the token is invalid for any supported reason
|
||||
type Claims interface {
|
||||
Valid() error
|
||||
}
|
||||
|
||||
// Structured version of Claims Section, as referenced at
|
||||
// https://tools.ietf.org/html/rfc7519#section-4.1
|
||||
// See examples for how to use this with your own claim types
|
||||
type StandardClaims struct {
|
||||
Audience string `json:"aud,omitempty"`
|
||||
ExpiresAt int64 `json:"exp,omitempty"`
|
||||
Id string `json:"jti,omitempty"`
|
||||
IssuedAt int64 `json:"iat,omitempty"`
|
||||
Issuer string `json:"iss,omitempty"`
|
||||
NotBefore int64 `json:"nbf,omitempty"`
|
||||
Subject string `json:"sub,omitempty"`
|
||||
}
|
||||
|
||||
// Validates time based claims "exp, iat, nbf".
|
||||
// There is no accounting for clock skew.
|
||||
// As well, if any of the above claims are not in the token, it will still
|
||||
// be considered a valid claim.
|
||||
func (c StandardClaims) Valid() error {
|
||||
vErr := new(ValidationError)
|
||||
now := TimeFunc().Unix()
|
||||
|
||||
// The claims below are optional, by default, so if they are set to the
|
||||
// default value in Go, let's not fail the verification for them.
|
||||
if c.VerifyExpiresAt(now, false) == false {
|
||||
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
|
||||
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
|
||||
vErr.Errors |= ValidationErrorExpired
|
||||
}
|
||||
|
||||
if c.VerifyIssuedAt(now, false) == false {
|
||||
vErr.Inner = fmt.Errorf("Token used before issued")
|
||||
vErr.Errors |= ValidationErrorIssuedAt
|
||||
}
|
||||
|
||||
if c.VerifyNotBefore(now, false) == false {
|
||||
vErr.Inner = fmt.Errorf("token is not valid yet")
|
||||
vErr.Errors |= ValidationErrorNotValidYet
|
||||
}
|
||||
|
||||
if vErr.valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return vErr
|
||||
}
|
||||
|
||||
// Compares the aud claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
|
||||
return verifyAud(c.Audience, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the exp claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
|
||||
return verifyExp(c.ExpiresAt, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the iat claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
|
||||
return verifyIat(c.IssuedAt, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the iss claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
|
||||
return verifyIss(c.Issuer, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the nbf claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
||||
return verifyNbf(c.NotBefore, cmp, req)
|
||||
}
|
||||
|
||||
// ----- helpers
|
||||
|
||||
func verifyAud(aud string, cmp string, required bool) bool {
|
||||
if aud == "" {
|
||||
return !required
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func verifyExp(exp int64, now int64, required bool) bool {
|
||||
if exp == 0 {
|
||||
return !required
|
||||
}
|
||||
return now <= exp
|
||||
}
|
||||
|
||||
func verifyIat(iat int64, now int64, required bool) bool {
|
||||
if iat == 0 {
|
||||
return !required
|
||||
}
|
||||
return now >= iat
|
||||
}
|
||||
|
||||
func verifyIss(iss string, cmp string, required bool) bool {
|
||||
if iss == "" {
|
||||
return !required
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func verifyNbf(nbf int64, now int64, required bool) bool {
|
||||
if nbf == 0 {
|
||||
return !required
|
||||
}
|
||||
return now >= nbf
|
||||
}
|
4
vendor/github.com/dgrijalva/jwt-go/doc.go
generated
vendored
4
vendor/github.com/dgrijalva/jwt-go/doc.go
generated
vendored
|
@ -1,4 +0,0 @@
|
|||
// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
|
||||
//
|
||||
// See README.md for more info.
|
||||
package jwt
|
147
vendor/github.com/dgrijalva/jwt-go/ecdsa.go
generated
vendored
147
vendor/github.com/dgrijalva/jwt-go/ecdsa.go
generated
vendored
|
@ -1,147 +0,0 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var (
|
||||
// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
|
||||
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
|
||||
)
|
||||
|
||||
// Implements the ECDSA family of signing methods signing methods
|
||||
type SigningMethodECDSA struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
KeySize int
|
||||
CurveBits int
|
||||
}
|
||||
|
||||
// Specific instances for EC256 and company
|
||||
var (
|
||||
SigningMethodES256 *SigningMethodECDSA
|
||||
SigningMethodES384 *SigningMethodECDSA
|
||||
SigningMethodES512 *SigningMethodECDSA
|
||||
)
|
||||
|
||||
func init() {
|
||||
// ES256
|
||||
SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
|
||||
RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
|
||||
return SigningMethodES256
|
||||
})
|
||||
|
||||
// ES384
|
||||
SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
|
||||
RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
|
||||
return SigningMethodES384
|
||||
})
|
||||
|
||||
// ES512
|
||||
SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
|
||||
RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
|
||||
return SigningMethodES512
|
||||
})
|
||||
}
|
||||
|
||||
func (m *SigningMethodECDSA) Alg() string {
|
||||
return m.Name
|
||||
}
|
||||
|
||||
// Implements the Verify method from SigningMethod
|
||||
// For this verify method, key must be an ecdsa.PublicKey struct
|
||||
func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
|
||||
var err error
|
||||
|
||||
// Decode the signature
|
||||
var sig []byte
|
||||
if sig, err = DecodeSegment(signature); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the key
|
||||
var ecdsaKey *ecdsa.PublicKey
|
||||
switch k := key.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
ecdsaKey = k
|
||||
default:
|
||||
return ErrInvalidKeyType
|
||||
}
|
||||
|
||||
if len(sig) != 2*m.KeySize {
|
||||
return ErrECDSAVerification
|
||||
}
|
||||
|
||||
r := big.NewInt(0).SetBytes(sig[:m.KeySize])
|
||||
s := big.NewInt(0).SetBytes(sig[m.KeySize:])
|
||||
|
||||
// Create hasher
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Verify the signature
|
||||
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
|
||||
return nil
|
||||
} else {
|
||||
return ErrECDSAVerification
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
// For this signing method, key must be an ecdsa.PrivateKey struct
|
||||
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
|
||||
// Get the key
|
||||
var ecdsaKey *ecdsa.PrivateKey
|
||||
switch k := key.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
ecdsaKey = k
|
||||
default:
|
||||
return "", ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Create the hasher
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Sign the string and return r, s
|
||||
if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
|
||||
curveBits := ecdsaKey.Curve.Params().BitSize
|
||||
|
||||
if m.CurveBits != curveBits {
|
||||
return "", ErrInvalidKey
|
||||
}
|
||||
|
||||
keyBytes := curveBits / 8
|
||||
if curveBits%8 > 0 {
|
||||
keyBytes += 1
|
||||
}
|
||||
|
||||
// We serialize the outpus (r and s) into big-endian byte arrays and pad
|
||||
// them with zeros on the left to make sure the sizes work out. Both arrays
|
||||
// must be keyBytes long, and the output must be 2*keyBytes long.
|
||||
rBytes := r.Bytes()
|
||||
rBytesPadded := make([]byte, keyBytes)
|
||||
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
||||
|
||||
sBytes := s.Bytes()
|
||||
sBytesPadded := make([]byte, keyBytes)
|
||||
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
||||
|
||||
out := append(rBytesPadded, sBytesPadded...)
|
||||
|
||||
return EncodeSegment(out), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
67
vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
generated
vendored
67
vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
generated
vendored
|
@ -1,67 +0,0 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
|
||||
ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
|
||||
)
|
||||
|
||||
// Parse PEM encoded Elliptic Curve Private Key Structure
|
||||
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pkey *ecdsa.PrivateKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
|
||||
return nil, ErrNotECPrivateKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
||||
|
||||
// Parse PEM encoded PKCS1 or PKCS8 public key
|
||||
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
||||
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
|
||||
parsedKey = cert.PublicKey
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pkey *ecdsa.PublicKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
|
||||
return nil, ErrNotECPublicKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
63
vendor/github.com/dgrijalva/jwt-go/errors.go
generated
vendored
63
vendor/github.com/dgrijalva/jwt-go/errors.go
generated
vendored
|
@ -1,63 +0,0 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Error constants
|
||||
var (
|
||||
ErrInvalidKey = errors.New("key is invalid")
|
||||
ErrInvalidKeyType = errors.New("key is of invalid type")
|
||||
ErrHashUnavailable = errors.New("the requested hash function is unavailable")
|
||||
)
|
||||
|
||||
// The errors that might occur when parsing and validating a token
|
||||
const (
|
||||
ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
|
||||
ValidationErrorUnverifiable // Token could not be verified because of signing problems
|
||||
ValidationErrorSignatureInvalid // Signature validation failed
|
||||
|
||||
// Standard Claim validation errors
|
||||
ValidationErrorAudience // AUD validation failed
|
||||
ValidationErrorExpired // EXP validation failed
|
||||
ValidationErrorIssuedAt // IAT validation failed
|
||||
ValidationErrorIssuer // ISS validation failed
|
||||
ValidationErrorNotValidYet // NBF validation failed
|
||||
ValidationErrorId // JTI validation failed
|
||||
ValidationErrorClaimsInvalid // Generic claims validation error
|
||||
)
|
||||
|
||||
// Helper for constructing a ValidationError with a string error message
|
||||
func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
|
||||
return &ValidationError{
|
||||
text: errorText,
|
||||
Errors: errorFlags,
|
||||
}
|
||||
}
|
||||
|
||||
// The error from Parse if token is not valid
|
||||
type ValidationError struct {
|
||||
Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
|
||||
Errors uint32 // bitfield. see ValidationError... constants
|
||||
text string // errors that do not have a valid error just have text
|
||||
}
|
||||
|
||||
// Validation error is an error type
|
||||
func (e ValidationError) Error() string {
|
||||
if e.Inner != nil {
|
||||
return e.Inner.Error()
|
||||
} else if e.text != "" {
|
||||
return e.text
|
||||
} else {
|
||||
return "token is invalid"
|
||||
}
|
||||
return e.Inner.Error()
|
||||
}
|
||||
|
||||
// No errors
|
||||
func (e *ValidationError) valid() bool {
|
||||
if e.Errors > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
94
vendor/github.com/dgrijalva/jwt-go/hmac.go
generated
vendored
94
vendor/github.com/dgrijalva/jwt-go/hmac.go
generated
vendored
|
@ -1,94 +0,0 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/hmac"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Implements the HMAC-SHA family of signing methods signing methods
|
||||
type SigningMethodHMAC struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
}
|
||||
|
||||
// Specific instances for HS256 and company
|
||||
var (
|
||||
SigningMethodHS256 *SigningMethodHMAC
|
||||
SigningMethodHS384 *SigningMethodHMAC
|
||||
SigningMethodHS512 *SigningMethodHMAC
|
||||
ErrSignatureInvalid = errors.New("signature is invalid")
|
||||
)
|
||||
|
||||
func init() {
|
||||
// HS256
|
||||
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
|
||||
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
|
||||
return SigningMethodHS256
|
||||
})
|
||||
|
||||
// HS384
|
||||
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
|
||||
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
|
||||
return SigningMethodHS384
|
||||
})
|
||||
|
||||
// HS512
|
||||
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
|
||||
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
|
||||
return SigningMethodHS512
|
||||
})
|
||||
}
|
||||
|
||||
func (m *SigningMethodHMAC) Alg() string {
|
||||
return m.Name
|
||||
}
|
||||
|
||||
// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
|
||||
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
|
||||
// Verify the key is the right type
|
||||
keyBytes, ok := key.([]byte)
|
||||
if !ok {
|
||||
return ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Decode signature, for comparison
|
||||
sig, err := DecodeSegment(signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Can we use the specified hashing method?
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
|
||||
// This signing method is symmetric, so we validate the signature
|
||||
// by reproducing the signature from the signing string and key, then
|
||||
// comparing that against the provided signature.
|
||||
hasher := hmac.New(m.Hash.New, keyBytes)
|
||||
hasher.Write([]byte(signingString))
|
||||
if !hmac.Equal(sig, hasher.Sum(nil)) {
|
||||
return ErrSignatureInvalid
|
||||
}
|
||||
|
||||
// No validation errors. Signature is good.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod for this signing method.
|
||||
// Key must be []byte
|
||||
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
|
||||
if keyBytes, ok := key.([]byte); ok {
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := hmac.New(m.Hash.New, keyBytes)
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
return EncodeSegment(hasher.Sum(nil)), nil
|
||||
}
|
||||
|
||||
return "", ErrInvalidKey
|
||||
}
|
94
vendor/github.com/dgrijalva/jwt-go/map_claims.go
generated
vendored
94
vendor/github.com/dgrijalva/jwt-go/map_claims.go
generated
vendored
|
@ -1,94 +0,0 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
// "fmt"
|
||||
)
|
||||
|
||||
// Claims type that uses the map[string]interface{} for JSON decoding
|
||||
// This is the default claims type if you don't supply one
|
||||
type MapClaims map[string]interface{}
|
||||
|
||||
// Compares the aud claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
|
||||
aud, _ := m["aud"].(string)
|
||||
return verifyAud(aud, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the exp claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
|
||||
switch exp := m["exp"].(type) {
|
||||
case float64:
|
||||
return verifyExp(int64(exp), cmp, req)
|
||||
case json.Number:
|
||||
v, _ := exp.Int64()
|
||||
return verifyExp(v, cmp, req)
|
||||
}
|
||||
return req == false
|
||||
}
|
||||
|
||||
// Compares the iat claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
|
||||
switch iat := m["iat"].(type) {
|
||||
case float64:
|
||||
return verifyIat(int64(iat), cmp, req)
|
||||
case json.Number:
|
||||
v, _ := iat.Int64()
|
||||
return verifyIat(v, cmp, req)
|
||||
}
|
||||
return req == false
|
||||
}
|
||||
|
||||
// Compares the iss claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
|
||||
iss, _ := m["iss"].(string)
|
||||
return verifyIss(iss, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the nbf claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
||||
switch nbf := m["nbf"].(type) {
|
||||
case float64:
|
||||
return verifyNbf(int64(nbf), cmp, req)
|
||||
case json.Number:
|
||||
v, _ := nbf.Int64()
|
||||
return verifyNbf(v, cmp, req)
|
||||
}
|
||||
return req == false
|
||||
}
|
||||
|
||||
// Validates time based claims "exp, iat, nbf".
|
||||
// There is no accounting for clock skew.
|
||||
// As well, if any of the above claims are not in the token, it will still
|
||||
// be considered a valid claim.
|
||||
func (m MapClaims) Valid() error {
|
||||
vErr := new(ValidationError)
|
||||
now := TimeFunc().Unix()
|
||||
|
||||
if m.VerifyExpiresAt(now, false) == false {
|
||||
vErr.Inner = errors.New("Token is expired")
|
||||
vErr.Errors |= ValidationErrorExpired
|
||||
}
|
||||
|
||||
if m.VerifyIssuedAt(now, false) == false {
|
||||
vErr.Inner = errors.New("Token used before issued")
|
||||
vErr.Errors |= ValidationErrorIssuedAt
|
||||
}
|
||||
|
||||
if m.VerifyNotBefore(now, false) == false {
|
||||
vErr.Inner = errors.New("Token is not valid yet")
|
||||
vErr.Errors |= ValidationErrorNotValidYet
|
||||
}
|
||||
|
||||
if vErr.valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return vErr
|
||||
}
|
52
vendor/github.com/dgrijalva/jwt-go/none.go
generated
vendored
52
vendor/github.com/dgrijalva/jwt-go/none.go
generated
vendored
|
@ -1,52 +0,0 @@
|
|||
package jwt
|
||||
|
||||
// Implements the none signing method. This is required by the spec
|
||||
// but you probably should never use it.
|
||||
var SigningMethodNone *signingMethodNone
|
||||
|
||||
const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
|
||||
|
||||
var NoneSignatureTypeDisallowedError error
|
||||
|
||||
type signingMethodNone struct{}
|
||||
type unsafeNoneMagicConstant string
|
||||
|
||||
func init() {
|
||||
SigningMethodNone = &signingMethodNone{}
|
||||
NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
|
||||
|
||||
RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
|
||||
return SigningMethodNone
|
||||
})
|
||||
}
|
||||
|
||||
func (m *signingMethodNone) Alg() string {
|
||||
return "none"
|
||||
}
|
||||
|
||||
// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
|
||||
func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
|
||||
// Key must be UnsafeAllowNoneSignatureType to prevent accidentally
|
||||
// accepting 'none' signing method
|
||||
if _, ok := key.(unsafeNoneMagicConstant); !ok {
|
||||
return NoneSignatureTypeDisallowedError
|
||||
}
|
||||
// If signing method is none, signature must be an empty string
|
||||
if signature != "" {
|
||||
return NewValidationError(
|
||||
"'none' signing method with non-empty signature",
|
||||
ValidationErrorSignatureInvalid,
|
||||
)
|
||||
}
|
||||
|
||||
// Accept 'none' signing method.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
|
||||
func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
|
||||
if _, ok := key.(unsafeNoneMagicConstant); ok {
|
||||
return "", nil
|
||||
}
|
||||
return "", NoneSignatureTypeDisallowedError
|
||||
}
|
131
vendor/github.com/dgrijalva/jwt-go/parser.go
generated
vendored
131
vendor/github.com/dgrijalva/jwt-go/parser.go
generated
vendored
|
@ -1,131 +0,0 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
ValidMethods []string // If populated, only these methods will be considered valid
|
||||
UseJSONNumber bool // Use JSON Number format in JSON decoder
|
||||
SkipClaimsValidation bool // Skip claims validation during token parsing
|
||||
}
|
||||
|
||||
// Parse, validate, and return a token.
|
||||
// keyFunc will receive the parsed token and should return the key for validating.
|
||||
// If everything is kosher, err will be nil
|
||||
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
|
||||
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
|
||||
}
|
||||
|
||||
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
|
||||
parts := strings.Split(tokenString, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
|
||||
}
|
||||
|
||||
var err error
|
||||
token := &Token{Raw: tokenString}
|
||||
|
||||
// parse Header
|
||||
var headerBytes []byte
|
||||
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
|
||||
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
|
||||
return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
|
||||
}
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
|
||||
// parse Claims
|
||||
var claimBytes []byte
|
||||
token.Claims = claims
|
||||
|
||||
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
|
||||
if p.UseJSONNumber {
|
||||
dec.UseNumber()
|
||||
}
|
||||
// JSON Decode. Special case for map type to avoid weird pointer behavior
|
||||
if c, ok := token.Claims.(MapClaims); ok {
|
||||
err = dec.Decode(&c)
|
||||
} else {
|
||||
err = dec.Decode(&claims)
|
||||
}
|
||||
// Handle decode error
|
||||
if err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
|
||||
// Lookup signature method
|
||||
if method, ok := token.Header["alg"].(string); ok {
|
||||
if token.Method = GetSigningMethod(method); token.Method == nil {
|
||||
return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
|
||||
}
|
||||
} else {
|
||||
return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
|
||||
}
|
||||
|
||||
// Verify signing method is in the required set
|
||||
if p.ValidMethods != nil {
|
||||
var signingMethodValid = false
|
||||
var alg = token.Method.Alg()
|
||||
for _, m := range p.ValidMethods {
|
||||
if m == alg {
|
||||
signingMethodValid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !signingMethodValid {
|
||||
// signing method is not in the listed set
|
||||
return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup key
|
||||
var key interface{}
|
||||
if keyFunc == nil {
|
||||
// keyFunc was not provided. short circuiting validation
|
||||
return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
|
||||
}
|
||||
if key, err = keyFunc(token); err != nil {
|
||||
// keyFunc returned an error
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
|
||||
}
|
||||
|
||||
vErr := &ValidationError{}
|
||||
|
||||
// Validate Claims
|
||||
if !p.SkipClaimsValidation {
|
||||
if err := token.Claims.Valid(); err != nil {
|
||||
|
||||
// If the Claims Valid returned an error, check if it is a validation error,
|
||||
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
|
||||
if e, ok := err.(*ValidationError); !ok {
|
||||
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
|
||||
} else {
|
||||
vErr = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Perform validation
|
||||
token.Signature = parts[2]
|
||||
if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
|
||||
vErr.Inner = err
|
||||
vErr.Errors |= ValidationErrorSignatureInvalid
|
||||
}
|
||||
|
||||
if vErr.valid() {
|
||||
token.Valid = true
|
||||
return token, nil
|
||||
}
|
||||
|
||||
return token, vErr
|
||||
}
|
100
vendor/github.com/dgrijalva/jwt-go/rsa.go
generated
vendored
100
vendor/github.com/dgrijalva/jwt-go/rsa.go
generated
vendored
|
@ -1,100 +0,0 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
)
|
||||
|
||||
// Implements the RSA family of signing methods signing methods
|
||||
type SigningMethodRSA struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
}
|
||||
|
||||
// Specific instances for RS256 and company
|
||||
var (
|
||||
SigningMethodRS256 *SigningMethodRSA
|
||||
SigningMethodRS384 *SigningMethodRSA
|
||||
SigningMethodRS512 *SigningMethodRSA
|
||||
)
|
||||
|
||||
func init() {
|
||||
// RS256
|
||||
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
|
||||
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
|
||||
return SigningMethodRS256
|
||||
})
|
||||
|
||||
// RS384
|
||||
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
|
||||
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
|
||||
return SigningMethodRS384
|
||||
})
|
||||
|
||||
// RS512
|
||||
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
|
||||
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
|
||||
return SigningMethodRS512
|
||||
})
|
||||
}
|
||||
|
||||
func (m *SigningMethodRSA) Alg() string {
|
||||
return m.Name
|
||||
}
|
||||
|
||||
// Implements the Verify method from SigningMethod
|
||||
// For this signing method, must be an rsa.PublicKey structure.
|
||||
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
|
||||
var err error
|
||||
|
||||
// Decode the signature
|
||||
var sig []byte
|
||||
if sig, err = DecodeSegment(signature); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rsaKey *rsa.PublicKey
|
||||
var ok bool
|
||||
|
||||
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
|
||||
return ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Create hasher
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Verify the signature
|
||||
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
// For this signing method, must be an rsa.PrivateKey structure.
|
||||
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
|
||||
var rsaKey *rsa.PrivateKey
|
||||
var ok bool
|
||||
|
||||
// Validate type of key
|
||||
if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
|
||||
return "", ErrInvalidKey
|
||||
}
|
||||
|
||||
// Create the hasher
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Sign the string and return the encoded bytes
|
||||
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
|
||||
return EncodeSegment(sigBytes), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
126
vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
generated
vendored
126
vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
generated
vendored
|
@ -1,126 +0,0 @@
|
|||
// +build go1.4
|
||||
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
)
|
||||
|
||||
// Implements the RSAPSS family of signing methods signing methods
|
||||
type SigningMethodRSAPSS struct {
|
||||
*SigningMethodRSA
|
||||
Options *rsa.PSSOptions
|
||||
}
|
||||
|
||||
// Specific instances for RS/PS and company
|
||||
var (
|
||||
SigningMethodPS256 *SigningMethodRSAPSS
|
||||
SigningMethodPS384 *SigningMethodRSAPSS
|
||||
SigningMethodPS512 *SigningMethodRSAPSS
|
||||
)
|
||||
|
||||
func init() {
|
||||
// PS256
|
||||
SigningMethodPS256 = &SigningMethodRSAPSS{
|
||||
&SigningMethodRSA{
|
||||
Name: "PS256",
|
||||
Hash: crypto.SHA256,
|
||||
},
|
||||
&rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthAuto,
|
||||
Hash: crypto.SHA256,
|
||||
},
|
||||
}
|
||||
RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
|
||||
return SigningMethodPS256
|
||||
})
|
||||
|
||||
// PS384
|
||||
SigningMethodPS384 = &SigningMethodRSAPSS{
|
||||
&SigningMethodRSA{
|
||||
Name: "PS384",
|
||||
Hash: crypto.SHA384,
|
||||
},
|
||||
&rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthAuto,
|
||||
Hash: crypto.SHA384,
|
||||
},
|
||||
}
|
||||
RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
|
||||
return SigningMethodPS384
|
||||
})
|
||||
|
||||
// PS512
|
||||
SigningMethodPS512 = &SigningMethodRSAPSS{
|
||||
&SigningMethodRSA{
|
||||
Name: "PS512",
|
||||
Hash: crypto.SHA512,
|
||||
},
|
||||
&rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthAuto,
|
||||
Hash: crypto.SHA512,
|
||||
},
|
||||
}
|
||||
RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
|
||||
return SigningMethodPS512
|
||||
})
|
||||
}
|
||||
|
||||
// Implements the Verify method from SigningMethod
|
||||
// For this verify method, key must be an rsa.PublicKey struct
|
||||
func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
|
||||
var err error
|
||||
|
||||
// Decode the signature
|
||||
var sig []byte
|
||||
if sig, err = DecodeSegment(signature); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rsaKey *rsa.PublicKey
|
||||
switch k := key.(type) {
|
||||
case *rsa.PublicKey:
|
||||
rsaKey = k
|
||||
default:
|
||||
return ErrInvalidKey
|
||||
}
|
||||
|
||||
// Create hasher
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
// For this signing method, key must be an rsa.PrivateKey struct
|
||||
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
|
||||
var rsaKey *rsa.PrivateKey
|
||||
|
||||
switch k := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
rsaKey = k
|
||||
default:
|
||||
return "", ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Create the hasher
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Sign the string and return the encoded bytes
|
||||
if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
|
||||
return EncodeSegment(sigBytes), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
69
vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
generated
vendored
69
vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
generated
vendored
|
@ -1,69 +0,0 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
|
||||
ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
|
||||
ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
|
||||
)
|
||||
|
||||
// Parse PEM encoded PKCS1 or PKCS8 private key
|
||||
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
|
||||
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pkey *rsa.PrivateKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
|
||||
return nil, ErrNotRSAPrivateKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
||||
|
||||
// Parse PEM encoded PKCS1 or PKCS8 public key
|
||||
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
||||
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
|
||||
parsedKey = cert.PublicKey
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pkey *rsa.PublicKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
|
||||
return nil, ErrNotRSAPublicKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
35
vendor/github.com/dgrijalva/jwt-go/signing_method.go
generated
vendored
35
vendor/github.com/dgrijalva/jwt-go/signing_method.go
generated
vendored
|
@ -1,35 +0,0 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var signingMethods = map[string]func() SigningMethod{}
|
||||
var signingMethodLock = new(sync.RWMutex)
|
||||
|
||||
// Implement SigningMethod to add new methods for signing or verifying tokens.
|
||||
type SigningMethod interface {
|
||||
Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
|
||||
Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
|
||||
Alg() string // returns the alg identifier for this method (example: 'HS256')
|
||||
}
|
||||
|
||||
// Register the "alg" name and a factory function for signing method.
|
||||
// This is typically done during init() in the method's implementation
|
||||
func RegisterSigningMethod(alg string, f func() SigningMethod) {
|
||||
signingMethodLock.Lock()
|
||||
defer signingMethodLock.Unlock()
|
||||
|
||||
signingMethods[alg] = f
|
||||
}
|
||||
|
||||
// Get a signing method from an "alg" string
|
||||
func GetSigningMethod(alg string) (method SigningMethod) {
|
||||
signingMethodLock.RLock()
|
||||
defer signingMethodLock.RUnlock()
|
||||
|
||||
if methodF, ok := signingMethods[alg]; ok {
|
||||
method = methodF()
|
||||
}
|
||||
return
|
||||
}
|
108
vendor/github.com/dgrijalva/jwt-go/token.go
generated
vendored
108
vendor/github.com/dgrijalva/jwt-go/token.go
generated
vendored
|
@ -1,108 +0,0 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
|
||||
// You can override it to use another time value. This is useful for testing or if your
|
||||
// server uses a different time zone than your tokens.
|
||||
var TimeFunc = time.Now
|
||||
|
||||
// Parse methods use this callback function to supply
|
||||
// the key for verification. The function receives the parsed,
|
||||
// but unverified Token. This allows you to use properties in the
|
||||
// Header of the token (such as `kid`) to identify which key to use.
|
||||
type Keyfunc func(*Token) (interface{}, error)
|
||||
|
||||
// A JWT Token. Different fields will be used depending on whether you're
|
||||
// creating or parsing/verifying a token.
|
||||
type Token struct {
|
||||
Raw string // The raw token. Populated when you Parse a token
|
||||
Method SigningMethod // The signing method used or to be used
|
||||
Header map[string]interface{} // The first segment of the token
|
||||
Claims Claims // The second segment of the token
|
||||
Signature string // The third segment of the token. Populated when you Parse a token
|
||||
Valid bool // Is the token valid? Populated when you Parse/Verify a token
|
||||
}
|
||||
|
||||
// Create a new Token. Takes a signing method
|
||||
func New(method SigningMethod) *Token {
|
||||
return NewWithClaims(method, MapClaims{})
|
||||
}
|
||||
|
||||
func NewWithClaims(method SigningMethod, claims Claims) *Token {
|
||||
return &Token{
|
||||
Header: map[string]interface{}{
|
||||
"typ": "JWT",
|
||||
"alg": method.Alg(),
|
||||
},
|
||||
Claims: claims,
|
||||
Method: method,
|
||||
}
|
||||
}
|
||||
|
||||
// Get the complete, signed token
|
||||
func (t *Token) SignedString(key interface{}) (string, error) {
|
||||
var sig, sstr string
|
||||
var err error
|
||||
if sstr, err = t.SigningString(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if sig, err = t.Method.Sign(sstr, key); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Join([]string{sstr, sig}, "."), nil
|
||||
}
|
||||
|
||||
// Generate the signing string. This is the
|
||||
// most expensive part of the whole deal. Unless you
|
||||
// need this for something special, just go straight for
|
||||
// the SignedString.
|
||||
func (t *Token) SigningString() (string, error) {
|
||||
var err error
|
||||
parts := make([]string, 2)
|
||||
for i, _ := range parts {
|
||||
var jsonValue []byte
|
||||
if i == 0 {
|
||||
if jsonValue, err = json.Marshal(t.Header); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
if jsonValue, err = json.Marshal(t.Claims); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
parts[i] = EncodeSegment(jsonValue)
|
||||
}
|
||||
return strings.Join(parts, "."), nil
|
||||
}
|
||||
|
||||
// Parse, validate, and return a token.
|
||||
// keyFunc will receive the parsed token and should return the key for validating.
|
||||
// If everything is kosher, err will be nil
|
||||
func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
|
||||
return new(Parser).Parse(tokenString, keyFunc)
|
||||
}
|
||||
|
||||
func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
|
||||
return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
|
||||
}
|
||||
|
||||
// Encode JWT specific base64url encoding with padding stripped
|
||||
func EncodeSegment(seg []byte) string {
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
|
||||
}
|
||||
|
||||
// Decode JWT specific base64url encoding with padding stripped
|
||||
func DecodeSegment(seg string) ([]byte, error) {
|
||||
if l := len(seg) % 4; l > 0 {
|
||||
seg += strings.Repeat("=", 4-l)
|
||||
}
|
||||
|
||||
return base64.URLEncoding.DecodeString(seg)
|
||||
}
|
182
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
182
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
|
@ -1,182 +0,0 @@
|
|||
a-palchikov <deemok@gmail.com>
|
||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||
Aaron Schlesinger <aschlesinger@deis.com>
|
||||
Aaron Vinson <avinson.public@gmail.com>
|
||||
Adam Duke <adam.v.duke@gmail.com>
|
||||
Adam Enger <adamenger@gmail.com>
|
||||
Adrian Mouat <adrian.mouat@gmail.com>
|
||||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
||||
Alex Chan <alex.chan@metaswitch.com>
|
||||
Alex Elman <aelman@indeed.com>
|
||||
Alexey Gladkov <gladkov.alexey@gmail.com>
|
||||
allencloud <allen.sun@daocloud.io>
|
||||
amitshukla <ashukla73@hotmail.com>
|
||||
Amy Lindburg <amy.lindburg@docker.com>
|
||||
Andrew Hsu <andrewhsu@acm.org>
|
||||
Andrew Meredith <andymeredith@gmail.com>
|
||||
Andrew T Nguyen <andrew.nguyen@docker.com>
|
||||
Andrey Kostov <kostov.andrey@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
Anis Elleuch <vadmeste@gmail.com>
|
||||
Anton Tiurin <noxiouz@yandex.ru>
|
||||
Antonio Mercado <amercado@thinknode.com>
|
||||
Antonio Murdaca <runcom@redhat.com>
|
||||
Anusha Ragunathan <anusha@docker.com>
|
||||
Arien Holthuizen <aholthuizen@schubergphilis.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arthur Baars <arthur@semmle.com>
|
||||
Asuka Suzuki <hello@tanksuzuki.com>
|
||||
Avi Miller <avi.miller@oracle.com>
|
||||
Ayose Cazorla <ayosec@gmail.com>
|
||||
BadZen <dave.trombley@gmail.com>
|
||||
Ben Bodenmiller <bbodenmiller@hotmail.com>
|
||||
Ben Firshman <ben@firshman.co.uk>
|
||||
bin liu <liubin0329@gmail.com>
|
||||
Brian Bland <brian.bland@docker.com>
|
||||
burnettk <burnettk@gmail.com>
|
||||
Carson A <ca@carsonoid.net>
|
||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
Charles Smith <charles.smith@docker.com>
|
||||
Chris Dillon <squarism@gmail.com>
|
||||
cuiwei13 <cuiwei13@pku.edu.cn>
|
||||
cyli <cyli@twistedmatrix.com>
|
||||
Daisuke Fujita <dtanshi45@gmail.com>
|
||||
Daniel Huhn <daniel@danielhuhn.de>
|
||||
Darren Shepherd <darren@rancher.com>
|
||||
Dave Trombley <dave.trombley@gmail.com>
|
||||
Dave Tucker <dt@docker.com>
|
||||
David Lawrence <david.lawrence@docker.com>
|
||||
David Verhasselt <david@crowdway.com>
|
||||
David Xia <dxia@spotify.com>
|
||||
davidli <wenquan.li@hp.com>
|
||||
Dejan Golja <dejan@golja.org>
|
||||
Derek McGowan <derek@mcgstyle.net>
|
||||
Diogo Mónica <diogo.monica@gmail.com>
|
||||
DJ Enriquez <dj.enriquez@infospace.com>
|
||||
Donald Huang <don.hcd@gmail.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
Edgar Lee <edgar.lee@docker.com>
|
||||
Eric Yang <windfarer@gmail.com>
|
||||
Fabio Berchtold <jamesclonk@jamesclonk.ch>
|
||||
Fabio Huser <fabio@fh1.ch>
|
||||
farmerworking <farmerworking@gmail.com>
|
||||
Felix Yan <felixonmars@archlinux.org>
|
||||
Florentin Raud <florentin.raud@gmail.com>
|
||||
Frank Chen <frankchn@gmail.com>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
gabriell nascimento <gabriell@bluesoft.com.br>
|
||||
Gleb Schukin <gschukin@ptsecurity.com>
|
||||
harche <p.harshal@gmail.com>
|
||||
Henri Gomez <henri.gomez@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hua Wang <wanghua.humble@gmail.com>
|
||||
HuKeping <hukeping@huawei.com>
|
||||
Ian Babrou <ibobrik@gmail.com>
|
||||
igayoso <igayoso@gmail.com>
|
||||
Jack Griffin <jackpg14@gmail.com>
|
||||
James Findley <jfindley@fastmail.com>
|
||||
Jason Freidman <jason.freidman@gmail.com>
|
||||
Jason Heiss <jheiss@aput.net>
|
||||
Jeff Nickoloff <jeff@allingeek.com>
|
||||
Jess Frazelle <acidburn@google.com>
|
||||
Jessie Frazelle <jessie@docker.com>
|
||||
jhaohai <jhaohai@foxmail.com>
|
||||
Jianqing Wang <tsing@jianqing.org>
|
||||
Jihoon Chung <jihoon@gmail.com>
|
||||
Joao Fernandes <joao.fernandes@docker.com>
|
||||
John Mulhausen <john@docker.com>
|
||||
John Starks <jostarks@microsoft.com>
|
||||
Jon Johnson <jonjohnson@google.com>
|
||||
Jon Poler <jonathan.poler@apcera.com>
|
||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||
Jordan Liggitt <jliggitt@redhat.com>
|
||||
Josh Chorlton <josh.chorlton@docker.com>
|
||||
Josh Hawn <josh.hawn@docker.com>
|
||||
Julien Fernandez <julien.fernandez@gmail.com>
|
||||
Ke Xu <leonhartx.k@gmail.com>
|
||||
Keerthan Mala <kmala@engineyard.com>
|
||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
||||
Kenneth Lim <kennethlimcp@gmail.com>
|
||||
Kenny Leung <kleung@google.com>
|
||||
Li Yi <denverdino@gmail.com>
|
||||
Liu Hua <sdu.liu@huawei.com>
|
||||
liuchang0812 <liuchang0812@gmail.com>
|
||||
Lloyd Ramey <lnr0626@gmail.com>
|
||||
Louis Kottmann <louis.kottmann@gmail.com>
|
||||
Luke Carpenter <x@rubynerd.net>
|
||||
Marcus Martins <marcus@docker.com>
|
||||
Mary Anthony <mary@docker.com>
|
||||
Matt Bentley <mbentley@mbentley.net>
|
||||
Matt Duch <matt@learnmetrics.com>
|
||||
Matt Moore <mattmoor@google.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
Matthew Green <greenmr@live.co.uk>
|
||||
Michael Prokop <mika@grml.org>
|
||||
Michal Minar <miminar@redhat.com>
|
||||
Michal Minář <miminar@redhat.com>
|
||||
Mike Brown <brownwm@us.ibm.com>
|
||||
Miquel Sabaté <msabate@suse.com>
|
||||
Misty Stanley-Jones <misty@apache.org>
|
||||
Misty Stanley-Jones <misty@docker.com>
|
||||
Morgan Bauer <mbauer@us.ibm.com>
|
||||
moxiegirl <mary@docker.com>
|
||||
Nathan Sullivan <nathan@nightsys.net>
|
||||
nevermosby <robolwq@qq.com>
|
||||
Nghia Tran <tcnghia@gmail.com>
|
||||
Nikita Tarasov <nikita@mygento.ru>
|
||||
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
||||
Oilbeater <liumengxinfly@gmail.com>
|
||||
Olivier Gambier <olivier@docker.com>
|
||||
Olivier Jacques <olivier.jacques@hp.com>
|
||||
Omer Cohen <git@omer.io>
|
||||
Patrick Devine <patrick.devine@docker.com>
|
||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||
Philip Misiowiec <philip@atlashealth.com>
|
||||
Pierre-Yves Ritschard <pyr@spootnik.org>
|
||||
Qiao Anran <qiaoanran@gmail.com>
|
||||
Randy Barlow <randy@electronsweatshop.com>
|
||||
Richard Scothern <richard.scothern@docker.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Rusty Conover <rusty@luckydinosaur.com>
|
||||
Sean Boran <Boran@users.noreply.github.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sebastien Coavoux <s.coavoux@free.fr>
|
||||
Serge Dubrouski <sergeyfd@gmail.com>
|
||||
Sharif Nassar <sharif@mrwacky.com>
|
||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
||||
Shreyas Karnik <karnik.shreyas@gmail.com>
|
||||
Simon Thulbourn <simon+github@thulbourn.com>
|
||||
spacexnice <yaoyao.xyy@alibaba-inc.com>
|
||||
Spencer Rinehart <anubis@overthemonkey.com>
|
||||
Stan Hu <stanhu@gmail.com>
|
||||
Stefan Majewsky <stefan.majewsky@sap.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Sungho Moon <sungho.moon@navercorp.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
||||
Ted Reed <ted.reed@gmail.com>
|
||||
tgic <farmer1992@gmail.com>
|
||||
Thomas Sjögren <konstruktoid@users.noreply.github.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tonis Tiigi <tonistiigi@gmail.com>
|
||||
Tony Holdstock-Brown <tony@docker.com>
|
||||
Trevor Pounds <trevor.pounds@gmail.com>
|
||||
Troels Thomsen <troels@thomsen.io>
|
||||
Victor Vieux <vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Demeester <vincent@sbr.pm>
|
||||
Vincent Giersch <vincent.giersch@ovh.net>
|
||||
W. Trevor King <wking@tremily.us>
|
||||
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
|
||||
xg.song <xg.song@venusource.com>
|
||||
xiekeyang <xiekeyang@huawei.com>
|
||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
||||
yaoyao.xyy <yaoyao.xyy@alibaba-inc.com>
|
||||
yuexiao-wang <wang.yuexiao@zte.com.cn>
|
||||
yuzou <zouyu7@huawei.com>
|
||||
zhouhaibing089 <zhouhaibing089@gmail.com>
|
||||
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
202
vendor/github.com/docker/distribution/LICENSE
generated
vendored
202
vendor/github.com/docker/distribution/LICENSE
generated
vendored
|
@ -1,202 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
|
@ -1,247 +0,0 @@
|
|||
package digestset
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDigestNotFound is used when a matching digest
|
||||
// could not be found in a set.
|
||||
ErrDigestNotFound = errors.New("digest not found")
|
||||
|
||||
// ErrDigestAmbiguous is used when multiple digests
|
||||
// are found in a set. None of the matching digests
|
||||
// should be considered valid matches.
|
||||
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||
)
|
||||
|
||||
// Set is used to hold a unique set of digests which
|
||||
// may be easily referenced by easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are omitted from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
// appropriately long short code should be used.
|
||||
type Set struct {
|
||||
mutex sync.RWMutex
|
||||
entries digestEntries
|
||||
}
|
||||
|
||||
// NewSet creates an empty set of digests
|
||||
// which may have digests added.
|
||||
func NewSet() *Set {
|
||||
return &Set{
|
||||
entries: digestEntries{},
|
||||
}
|
||||
}
|
||||
|
||||
// checkShortMatch checks whether two digests match as either whole
|
||||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
if len(hex) == len(shortHex) {
|
||||
if hex != shortHex {
|
||||
return false
|
||||
}
|
||||
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
} else if !strings.HasPrefix(hex, shortHex) {
|
||||
return false
|
||||
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Lookup looks for a digest matching the given string representation.
|
||||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (digest.Digest, error) {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
if len(dst.entries) == 0 {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
var (
|
||||
searchFunc func(int) bool
|
||||
alg digest.Algorithm
|
||||
hex string
|
||||
)
|
||||
dgst, err := digest.Parse(d)
|
||||
if err == digest.ErrDigestInvalidFormat {
|
||||
hex = d
|
||||
searchFunc = func(i int) bool {
|
||||
return dst.entries[i].val >= d
|
||||
}
|
||||
} else {
|
||||
hex = dgst.Hex()
|
||||
alg = dgst.Algorithm()
|
||||
searchFunc = func(i int) bool {
|
||||
if dst.entries[i].val == hex {
|
||||
return dst.entries[i].alg >= alg
|
||||
}
|
||||
return dst.entries[i].val >= hex
|
||||
}
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||
return "", ErrDigestAmbiguous
|
||||
}
|
||||
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
|
||||
// Add adds the given digest to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// set, this operation will be a no-op.
|
||||
func (dst *Set) Add(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) {
|
||||
dst.entries = append(dst.entries, entry)
|
||||
return nil
|
||||
} else if dst.entries[idx].digest == d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := append(dst.entries, nil)
|
||||
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||
entries[idx] = entry
|
||||
dst.entries = entries
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes the given digest from the set. An err will be
|
||||
// returned if the given digest is invalid. If the digest does
|
||||
// not exist in the set, this operation will be a no-op.
|
||||
func (dst *Set) Remove(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
// Not found if idx is after or value at idx is not digest
|
||||
if idx == len(dst.entries) || dst.entries[idx].digest != d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := dst.entries
|
||||
copy(entries[idx:], entries[idx+1:])
|
||||
entries = entries[:len(entries)-1]
|
||||
dst.entries = entries
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// All returns all the digests in the set
|
||||
func (dst *Set) All() []digest.Digest {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
retValues := make([]digest.Digest, len(dst.entries))
|
||||
for i := range dst.entries {
|
||||
retValues[i] = dst.entries[i].digest
|
||||
}
|
||||
|
||||
return retValues
|
||||
}
|
||||
|
||||
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||
// length represents the minimum value, the maximum length may be the
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
m := make(map[digest.Digest]string, len(dst.entries))
|
||||
l := length
|
||||
resetIdx := 0
|
||||
for i := 0; i < len(dst.entries); i++ {
|
||||
var short string
|
||||
extended := true
|
||||
for extended {
|
||||
extended = false
|
||||
if len(dst.entries[i].val) <= l {
|
||||
short = dst.entries[i].digest.String()
|
||||
} else {
|
||||
short = dst.entries[i].val[:l]
|
||||
for j := i + 1; j < len(dst.entries); j++ {
|
||||
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||
if j > resetIdx {
|
||||
resetIdx = j
|
||||
}
|
||||
extended = true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if extended {
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
m[dst.entries[i].digest] = short
|
||||
if i >= resetIdx {
|
||||
l = length
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type digestEntry struct {
|
||||
alg digest.Algorithm
|
||||
val string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
type digestEntries []*digestEntry
|
||||
|
||||
func (d digestEntries) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d digestEntries) Less(i, j int) bool {
|
||||
if d[i].val != d[j].val {
|
||||
return d[i].val < d[j].val
|
||||
}
|
||||
return d[i].alg < d[j].alg
|
||||
}
|
||||
|
||||
func (d digestEntries) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
|
@ -1,42 +0,0 @@
|
|||
package reference
|
||||
|
||||
import "path"
|
||||
|
||||
// IsNameOnly returns true if reference only contains a repo name.
|
||||
func IsNameOnly(ref Named) bool {
|
||||
if _, ok := ref.(NamedTagged); ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := ref.(Canonical); ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FamiliarName returns the familiar name string
|
||||
// for the given named, familiarizing if needed.
|
||||
func FamiliarName(ref Named) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().Name()
|
||||
}
|
||||
return ref.Name()
|
||||
}
|
||||
|
||||
// FamiliarString returns the familiar string representation
|
||||
// for the given reference, familiarizing if needed.
|
||||
func FamiliarString(ref Reference) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().String()
|
||||
}
|
||||
return ref.String()
|
||||
}
|
||||
|
||||
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||
// See https://godoc.org/path#Match for supported patterns.
|
||||
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
|
||||
matched, err := path.Match(pattern, FamiliarString(ref))
|
||||
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||
matched, _ = path.Match(pattern, FamiliarName(namedRef))
|
||||
}
|
||||
return matched, err
|
||||
}
|
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
|
@ -1,170 +0,0 @@
|
|||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digestset"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
legacyDefaultDomain = "index.docker.io"
|
||||
defaultDomain = "docker.io"
|
||||
officialRepoName = "library"
|
||||
defaultTag = "latest"
|
||||
)
|
||||
|
||||
// normalizedNamed represents a name which has been
|
||||
// normalized and has a familiar form. A familiar name
|
||||
// is what is used in Docker UI. An example normalized
|
||||
// name is "docker.io/library/ubuntu" and corresponding
|
||||
// familiar name of "ubuntu".
|
||||
type normalizedNamed interface {
|
||||
Named
|
||||
Familiar() Named
|
||||
}
|
||||
|
||||
// ParseNormalizedNamed parses a string into a named reference
|
||||
// transforming a familiar name from Docker UI to a fully
|
||||
// qualified reference. If the value may be an identifier
|
||||
// use ParseAnyReference.
|
||||
func ParseNormalizedNamed(s string) (Named, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
|
||||
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
|
||||
}
|
||||
domain, remainder := splitDockerDomain(s)
|
||||
var remoteName string
|
||||
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
|
||||
remoteName = remainder[:tagSep]
|
||||
} else {
|
||||
remoteName = remainder
|
||||
}
|
||||
if strings.ToLower(remoteName) != remoteName {
|
||||
return nil, errors.New("invalid reference format: repository name must be lowercase")
|
||||
}
|
||||
|
||||
ref, err := Parse(domain + "/" + remainder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
named, isNamed := ref.(Named)
|
||||
if !isNamed {
|
||||
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitDockerDomain(name string) (domain, remainder string) {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||
domain, remainder = defaultDomain, name
|
||||
} else {
|
||||
domain, remainder = name[:i], name[i+1:]
|
||||
}
|
||||
if domain == legacyDefaultDomain {
|
||||
domain = defaultDomain
|
||||
}
|
||||
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
|
||||
remainder = officialRepoName + "/" + remainder
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||
// Returns a familiarized named only reference.
|
||||
func familiarizeName(named namedRepository) repository {
|
||||
repo := repository{
|
||||
domain: named.Domain(),
|
||||
path: named.Path(),
|
||||
}
|
||||
|
||||
if repo.domain == defaultDomain {
|
||||
repo.domain = ""
|
||||
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
|
||||
repo.path = split[1]
|
||||
}
|
||||
}
|
||||
return repo
|
||||
}
|
||||
|
||||
func (r reference) Familiar() Named {
|
||||
return reference{
|
||||
namedRepository: familiarizeName(r.namedRepository),
|
||||
tag: r.tag,
|
||||
digest: r.digest,
|
||||
}
|
||||
}
|
||||
|
||||
func (r repository) Familiar() Named {
|
||||
return familiarizeName(r)
|
||||
}
|
||||
|
||||
func (t taggedReference) Familiar() Named {
|
||||
return taggedReference{
|
||||
namedRepository: familiarizeName(t.namedRepository),
|
||||
tag: t.tag,
|
||||
}
|
||||
}
|
||||
|
||||
func (c canonicalReference) Familiar() Named {
|
||||
return canonicalReference{
|
||||
namedRepository: familiarizeName(c.namedRepository),
|
||||
digest: c.digest,
|
||||
}
|
||||
}
|
||||
|
||||
// TagNameOnly adds the default tag "latest" to a reference if it only has
|
||||
// a repo name.
|
||||
func TagNameOnly(ref Named) Named {
|
||||
if IsNameOnly(ref) {
|
||||
namedTagged, err := WithTag(ref, defaultTag)
|
||||
if err != nil {
|
||||
// Default tag must be valid, to create a NamedTagged
|
||||
// type with non-validated input the WithTag function
|
||||
// should be used instead
|
||||
panic(err)
|
||||
}
|
||||
return namedTagged
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
// ParseAnyReference parses a reference string as a possible identifier,
|
||||
// full digest, or familiar name.
|
||||
func ParseAnyReference(ref string) (Reference, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
|
||||
return digestReference("sha256:" + ref), nil
|
||||
}
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
||||
|
||||
// ParseAnyReferenceWithSet parses a reference string as a possible short
|
||||
// identifier to be matched in a digest set, a full digest, or familiar name.
|
||||
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
|
||||
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
|
||||
dgst, err := ds.Lookup(ref)
|
||||
if err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
} else {
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
433
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
433
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
|
@ -1,433 +0,0 @@
|
|||
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
//
|
||||
// identifier := /[a-f0-9]{64}/
|
||||
// short-identifier := /[a-f0-9]{6,64}/
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||
NameTotalLengthMax = 255
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||
ErrReferenceInvalidFormat = errors.New("invalid reference format")
|
||||
|
||||
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrTagInvalidFormat = errors.New("invalid tag format")
|
||||
|
||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||
|
||||
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
|
||||
|
||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||
|
||||
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
|
||||
|
||||
// ErrNameNotCanonical is returned when a name is not canonical.
|
||||
ErrNameNotCanonical = errors.New("repository name must be canonical")
|
||||
)
|
||||
|
||||
// Reference is an opaque object reference identifier that may include
|
||||
// modifiers such as a hostname, name, tag, and digest.
|
||||
type Reference interface {
|
||||
// String returns the full reference
|
||||
String() string
|
||||
}
|
||||
|
||||
// Field provides a wrapper type for resolving correct reference types when
|
||||
// working with encoding.
|
||||
type Field struct {
|
||||
reference Reference
|
||||
}
|
||||
|
||||
// AsField wraps a reference in a Field for encoding.
|
||||
func AsField(reference Reference) Field {
|
||||
return Field{reference}
|
||||
}
|
||||
|
||||
// Reference unwraps the reference type from the field to
|
||||
// return the Reference object. This object should be
|
||||
// of the appropriate type to further check for different
|
||||
// reference types.
|
||||
func (f Field) Reference() Reference {
|
||||
return f.reference
|
||||
}
|
||||
|
||||
// MarshalText serializes the field to byte text which
|
||||
// is the string of the reference.
|
||||
func (f Field) MarshalText() (p []byte, err error) {
|
||||
return []byte(f.reference.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses text bytes by invoking the
|
||||
// reference parser to ensure the appropriately
|
||||
// typed reference object is wrapped by field.
|
||||
func (f *Field) UnmarshalText(p []byte) error {
|
||||
r, err := Parse(string(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.reference = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// Named is an object with a full name
|
||||
type Named interface {
|
||||
Reference
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Tagged is an object which has a tag
|
||||
type Tagged interface {
|
||||
Reference
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// NamedTagged is an object including a name and tag.
|
||||
type NamedTagged interface {
|
||||
Named
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// Digested is an object which has a digest
|
||||
// in which it can be referenced by
|
||||
type Digested interface {
|
||||
Reference
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with domain and digest
|
||||
type Canonical interface {
|
||||
Named
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// namedRepository is a reference to a repository with a name.
|
||||
// A namedRepository has both domain and path components.
|
||||
type namedRepository interface {
|
||||
Named
|
||||
Domain() string
|
||||
Path() string
|
||||
}
|
||||
|
||||
// Domain returns the domain part of the Named reference
|
||||
func Domain(named Named) string {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain()
|
||||
}
|
||||
domain, _ := splitDomain(named.Name())
|
||||
return domain
|
||||
}
|
||||
|
||||
// Path returns the name without the domain part of the Named reference
|
||||
func Path(named Named) (name string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Path()
|
||||
}
|
||||
_, path := splitDomain(named.Name())
|
||||
return path
|
||||
}
|
||||
|
||||
func splitDomain(name string) (string, string) {
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if len(match) != 3 {
|
||||
return "", name
|
||||
}
|
||||
return match[1], match[2]
|
||||
}
|
||||
|
||||
// SplitHostname splits a named reference into a
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
// DEPRECATED: Use Domain or Path
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain(), r.Path()
|
||||
}
|
||||
return splitDomain(named.Name())
|
||||
}
|
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: Parse will not handle short digests.
|
||||
func Parse(s string) (Reference, error) {
|
||||
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||
if matches == nil {
|
||||
if s == "" {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
|
||||
return nil, ErrNameContainsUppercase
|
||||
}
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
|
||||
if len(matches[1]) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
var repo repository
|
||||
|
||||
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
||||
if nameMatch != nil && len(nameMatch) == 3 {
|
||||
repo.domain = nameMatch[1]
|
||||
repo.path = nameMatch[2]
|
||||
} else {
|
||||
repo.domain = ""
|
||||
repo.path = matches[1]
|
||||
}
|
||||
|
||||
ref := reference{
|
||||
namedRepository: repo,
|
||||
tag: matches[2],
|
||||
}
|
||||
if matches[3] != "" {
|
||||
var err error
|
||||
ref.digest, err = digest.Parse(matches[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r := getBestReferenceType(ref)
|
||||
if r == nil {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name and be in the canonical
|
||||
// form, otherwise an error is returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if named.String() != s {
|
||||
return nil, ErrNameNotCanonical
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// WithName returns a named object representing the given string. If the input
|
||||
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||
func WithName(name string) (Named, error) {
|
||||
if len(name) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if match == nil || len(match) != 3 {
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
return repository{
|
||||
domain: match[1],
|
||||
path: match[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
// reference incorporating both the name and the tag.
|
||||
func WithTag(name Named, tag string) (NamedTagged, error) {
|
||||
if !anchoredTagRegexp.MatchString(tag) {
|
||||
return nil, ErrTagInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if canonical, ok := name.(Canonical); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
digest: canonical.Digest(),
|
||||
}, nil
|
||||
}
|
||||
return taggedReference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||
// a reference incorporating both the name and the digest.
|
||||
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||
return nil, ErrDigestInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if tagged, ok := name.(Tagged); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tagged.Tag(),
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
return canonicalReference{
|
||||
namedRepository: repo,
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
func TrimNamed(ref Named) Named {
|
||||
domain, path := SplitHostname(ref)
|
||||
return repository{
|
||||
domain: domain,
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func getBestReferenceType(ref reference) Reference {
|
||||
if ref.Name() == "" {
|
||||
// Allow digest only references
|
||||
if ref.digest != "" {
|
||||
return digestReference(ref.digest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if ref.tag == "" {
|
||||
if ref.digest != "" {
|
||||
return canonicalReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
digest: ref.digest,
|
||||
}
|
||||
}
|
||||
return ref.namedRepository
|
||||
}
|
||||
if ref.digest == "" {
|
||||
return taggedReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
tag: ref.tag,
|
||||
}
|
||||
}
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
type reference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (r reference) String() string {
|
||||
return r.Name() + ":" + r.tag + "@" + r.digest.String()
|
||||
}
|
||||
|
||||
func (r reference) Tag() string {
|
||||
return r.tag
|
||||
}
|
||||
|
||||
func (r reference) Digest() digest.Digest {
|
||||
return r.digest
|
||||
}
|
||||
|
||||
type repository struct {
|
||||
domain string
|
||||
path string
|
||||
}
|
||||
|
||||
func (r repository) String() string {
|
||||
return r.Name()
|
||||
}
|
||||
|
||||
func (r repository) Name() string {
|
||||
if r.domain == "" {
|
||||
return r.path
|
||||
}
|
||||
return r.domain + "/" + r.path
|
||||
}
|
||||
|
||||
func (r repository) Domain() string {
|
||||
return r.domain
|
||||
}
|
||||
|
||||
func (r repository) Path() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
type digestReference digest.Digest
|
||||
|
||||
func (d digestReference) String() string {
|
||||
return digest.Digest(d).String()
|
||||
}
|
||||
|
||||
func (d digestReference) Digest() digest.Digest {
|
||||
return digest.Digest(d)
|
||||
}
|
||||
|
||||
type taggedReference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
}
|
||||
|
||||
func (t taggedReference) String() string {
|
||||
return t.Name() + ":" + t.tag
|
||||
}
|
||||
|
||||
func (t taggedReference) Tag() string {
|
||||
return t.tag
|
||||
}
|
||||
|
||||
type canonicalReference struct {
|
||||
namedRepository
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (c canonicalReference) String() string {
|
||||
return c.Name() + "@" + c.digest.String()
|
||||
}
|
||||
|
||||
func (c canonicalReference) Digest() digest.Digest {
|
||||
return c.digest
|
||||
}
|
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
|
@ -1,143 +0,0 @@
|
|||
package reference
|
||||
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression(
|
||||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp
|
||||
// and followed by an optional port.
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// DomainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
DomainRegexp = expression(
|
||||
domainComponentRegexp,
|
||||
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(DomainRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(DomainRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
IdentifierRegexp = match(`([a-f0-9]{64})`)
|
||||
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
|
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
|
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile
|
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp {
|
||||
re := match(regexp.QuoteMeta(s))
|
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete {
|
||||
panic("must be a literal")
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
var s string
|
||||
for _, re := range res {
|
||||
s += re.String()
|
||||
}
|
||||
|
||||
return match(s)
|
||||
}
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `?`)
|
||||
}
|
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `+`)
|
||||
}
|
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(?:` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`^` + expression(res...).String() + `$`)
|
||||
}
|
1283
vendor/github.com/emicklei/go-restful-swagger12/model_builder_test.go
generated
vendored
Normal file
1283
vendor/github.com/emicklei/go-restful-swagger12/model_builder_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
48
vendor/github.com/emicklei/go-restful-swagger12/model_list_test.go
generated
vendored
Normal file
48
vendor/github.com/emicklei/go-restful-swagger12/model_list_test.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package swagger
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestModelList(t *testing.T) {
|
||||
m := Model{}
|
||||
m.Id = "m"
|
||||
l := ModelList{}
|
||||
l.Put("m", m)
|
||||
k, ok := l.At("m")
|
||||
if !ok {
|
||||
t.Error("want model back")
|
||||
}
|
||||
if got, want := k.Id, "m"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelList_Marshal(t *testing.T) {
|
||||
l := ModelList{}
|
||||
m := Model{Id: "myid"}
|
||||
l.Put("myid", m)
|
||||
data, err := json.Marshal(l)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got, want := string(data), `{"myid":{"id":"myid","properties":{}}}`; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelList_Unmarshal(t *testing.T) {
|
||||
data := `{"myid":{"id":"myid","properties":{}}}`
|
||||
l := ModelList{}
|
||||
if err := json.Unmarshal([]byte(data), &l); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
m, ok := l.At("myid")
|
||||
if !ok {
|
||||
t.Error("expected myid")
|
||||
}
|
||||
if got, want := m.Id, "myid"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
70
vendor/github.com/emicklei/go-restful-swagger12/model_property_ext_test.go
generated
vendored
Normal file
70
vendor/github.com/emicklei/go-restful-swagger12/model_property_ext_test.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
package swagger
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// clear && go test -v -test.run TestThatExtraTagsAreReadIntoModel ...swagger
|
||||
func TestThatExtraTagsAreReadIntoModel(t *testing.T) {
|
||||
type fakeint int
|
||||
type fakearray string
|
||||
type Anything struct {
|
||||
Name string `description:"name" modelDescription:"a test"`
|
||||
Size int `minimum:"0" maximum:"10"`
|
||||
Stati string `enum:"off|on" default:"on" modelDescription:"more description"`
|
||||
ID string `unique:"true"`
|
||||
FakeInt fakeint `type:"integer"`
|
||||
FakeArray fakearray `type:"[]string"`
|
||||
IP net.IP `type:"string"`
|
||||
Password string
|
||||
}
|
||||
m := modelsFromStruct(Anything{})
|
||||
props, _ := m.At("swagger.Anything")
|
||||
p1, _ := props.Properties.At("Name")
|
||||
if got, want := p1.Description, "name"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
p2, _ := props.Properties.At("Size")
|
||||
if got, want := p2.Minimum, "0"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
if got, want := p2.Maximum, "10"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
p3, _ := props.Properties.At("Stati")
|
||||
if got, want := p3.Enum[0], "off"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
if got, want := p3.Enum[1], "on"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
p4, _ := props.Properties.At("ID")
|
||||
if got, want := *p4.UniqueItems, true; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
p5, _ := props.Properties.At("Password")
|
||||
if got, want := *p5.Type, "string"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
p6, _ := props.Properties.At("FakeInt")
|
||||
if got, want := *p6.Type, "integer"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
p7, _ := props.Properties.At("FakeArray")
|
||||
if got, want := *p7.Type, "array"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
p7p, _ := props.Properties.At("FakeArray")
|
||||
if got, want := *p7p.Items.Type, "string"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
p8, _ := props.Properties.At("IP")
|
||||
if got, want := *p8.Type, "string"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
|
||||
if got, want := props.Description, "a test\nmore description"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
47
vendor/github.com/emicklei/go-restful-swagger12/model_property_list_test.go
generated
vendored
Normal file
47
vendor/github.com/emicklei/go-restful-swagger12/model_property_list_test.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
package swagger
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestModelPropertyList(t *testing.T) {
|
||||
l := ModelPropertyList{}
|
||||
p := ModelProperty{Description: "d"}
|
||||
l.Put("p", p)
|
||||
q, ok := l.At("p")
|
||||
if !ok {
|
||||
t.Error("expected p")
|
||||
}
|
||||
if got, want := q.Description, "d"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelPropertyList_Marshal(t *testing.T) {
|
||||
l := ModelPropertyList{}
|
||||
p := ModelProperty{Description: "d"}
|
||||
l.Put("p", p)
|
||||
data, err := json.Marshal(l)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got, want := string(data), `{"p":{"description":"d"}}`; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelPropertyList_Unmarshal(t *testing.T) {
|
||||
data := `{"p":{"description":"d"}}`
|
||||
l := ModelPropertyList{}
|
||||
if err := json.Unmarshal([]byte(data), &l); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
m, ok := l.At("p")
|
||||
if !ok {
|
||||
t.Error("expected p")
|
||||
}
|
||||
if got, want := m.Description, "d"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
29
vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map_test.go
generated
vendored
Normal file
29
vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map_test.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
package swagger
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/emicklei/go-restful"
|
||||
)
|
||||
|
||||
// go test -v -test.run TestOrderedRouteMap ...swagger
|
||||
func TestOrderedRouteMap(t *testing.T) {
|
||||
m := newOrderedRouteMap()
|
||||
r1 := restful.Route{Path: "/r1"}
|
||||
r2 := restful.Route{Path: "/r2"}
|
||||
m.Add("a", r1)
|
||||
m.Add("b", r2)
|
||||
m.Add("b", r1)
|
||||
m.Add("d", r2)
|
||||
m.Add("c", r2)
|
||||
order := ""
|
||||
m.Do(func(k string, routes []restful.Route) {
|
||||
order += k
|
||||
if len(routes) == 0 {
|
||||
t.Fail()
|
||||
}
|
||||
})
|
||||
if order != "abdc" {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
42
vendor/github.com/emicklei/go-restful-swagger12/postbuild_model_test.go
generated
vendored
Normal file
42
vendor/github.com/emicklei/go-restful-swagger12/postbuild_model_test.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package swagger
|
||||
|
||||
import "testing"
|
||||
|
||||
type Boat struct {
|
||||
Length int `json:"-"` // on default, this makes the fields not required
|
||||
Weight int `json:"-"`
|
||||
}
|
||||
|
||||
// PostBuildModel is from swagger.ModelBuildable
|
||||
func (b Boat) PostBuildModel(m *Model) *Model {
|
||||
// override required
|
||||
m.Required = []string{"Length", "Weight"}
|
||||
|
||||
// add model property (just to test is can be added; is this a real usecase?)
|
||||
extraType := "string"
|
||||
m.Properties.Put("extra", ModelProperty{
|
||||
Description: "extra description",
|
||||
DataTypeFields: DataTypeFields{
|
||||
Type: &extraType,
|
||||
},
|
||||
})
|
||||
return m
|
||||
}
|
||||
|
||||
func TestCustomPostModelBuilde(t *testing.T) {
|
||||
testJsonFromStruct(t, Boat{}, `{
|
||||
"swagger.Boat": {
|
||||
"id": "swagger.Boat",
|
||||
"required": [
|
||||
"Length",
|
||||
"Weight"
|
||||
],
|
||||
"properties": {
|
||||
"extra": {
|
||||
"type": "string",
|
||||
"description": "extra description"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`)
|
||||
}
|
318
vendor/github.com/emicklei/go-restful-swagger12/swagger_test.go
generated
vendored
Normal file
318
vendor/github.com/emicklei/go-restful-swagger12/swagger_test.go
generated
vendored
Normal file
|
@ -0,0 +1,318 @@
|
|||
package swagger
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/emicklei/go-restful"
|
||||
"github.com/emicklei/go-restful-swagger12/test_package"
|
||||
)
|
||||
|
||||
func TestInfoStruct_Issue231(t *testing.T) {
|
||||
config := Config{
|
||||
Info: Info{
|
||||
Title: "Title",
|
||||
Description: "Description",
|
||||
TermsOfServiceUrl: "http://example.com",
|
||||
Contact: "example@example.com",
|
||||
License: "License",
|
||||
LicenseUrl: "http://example.com/license.txt",
|
||||
},
|
||||
}
|
||||
sws := newSwaggerService(config)
|
||||
str, err := json.MarshalIndent(sws.produceListing(), "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
compareJson(t, string(str), `
|
||||
{
|
||||
"apiVersion": "",
|
||||
"swaggerVersion": "1.2",
|
||||
"apis": null,
|
||||
"info": {
|
||||
"title": "Title",
|
||||
"description": "Description",
|
||||
"termsOfServiceUrl": "http://example.com",
|
||||
"contact": "example@example.com",
|
||||
"license": "License",
|
||||
"licenseUrl": "http://example.com/license.txt"
|
||||
}
|
||||
}
|
||||
`)
|
||||
}
|
||||
|
||||
// go test -v -test.run TestThatMultiplePathsOnRootAreHandled ...swagger
|
||||
func TestThatMultiplePathsOnRootAreHandled(t *testing.T) {
|
||||
ws1 := new(restful.WebService)
|
||||
ws1.Route(ws1.GET("/_ping").To(dummy))
|
||||
ws1.Route(ws1.GET("/version").To(dummy))
|
||||
|
||||
cfg := Config{
|
||||
WebServicesUrl: "http://here.com",
|
||||
ApiPath: "/apipath",
|
||||
WebServices: []*restful.WebService{ws1},
|
||||
}
|
||||
sws := newSwaggerService(cfg)
|
||||
decl := sws.composeDeclaration(ws1, "/")
|
||||
if got, want := len(decl.Apis), 2; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteSamples(t *testing.T) {
|
||||
ws1 := new(restful.WebService)
|
||||
ws1.Route(ws1.GET("/object").To(dummy).Writes(test_package.TestStruct{}))
|
||||
ws1.Route(ws1.GET("/array").To(dummy).Writes([]test_package.TestStruct{}))
|
||||
ws1.Route(ws1.GET("/object_and_array").To(dummy).Writes(struct{ Abc test_package.TestStruct }{}))
|
||||
|
||||
cfg := Config{
|
||||
WebServicesUrl: "http://here.com",
|
||||
ApiPath: "/apipath",
|
||||
WebServices: []*restful.WebService{ws1},
|
||||
}
|
||||
sws := newSwaggerService(cfg)
|
||||
|
||||
decl := sws.composeDeclaration(ws1, "/")
|
||||
|
||||
str, err := json.MarshalIndent(decl.Apis, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
compareJson(t, string(str), `
|
||||
[
|
||||
{
|
||||
"path": "/object",
|
||||
"description": "",
|
||||
"operations": [
|
||||
{
|
||||
"type": "test_package.TestStruct",
|
||||
"method": "GET",
|
||||
"nickname": "dummy",
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/array",
|
||||
"description": "",
|
||||
"operations": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "test_package.TestStruct"
|
||||
},
|
||||
"method": "GET",
|
||||
"nickname": "dummy",
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/object_and_array",
|
||||
"description": "",
|
||||
"operations": [
|
||||
{
|
||||
"type": "struct { Abc test_package.TestStruct }",
|
||||
"method": "GET",
|
||||
"nickname": "dummy",
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]`)
|
||||
|
||||
str, err = json.MarshalIndent(decl.Models, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
compareJson(t, string(str), `
|
||||
{
|
||||
"test_package.TestStruct": {
|
||||
"id": "test_package.TestStruct",
|
||||
"required": [
|
||||
"TestField"
|
||||
],
|
||||
"properties": {
|
||||
"TestField": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"||test_package.TestStruct": {
|
||||
"id": "||test_package.TestStruct",
|
||||
"properties": {}
|
||||
},
|
||||
"struct { Abc test_package.TestStruct }": {
|
||||
"id": "struct { Abc test_package.TestStruct }",
|
||||
"required": [
|
||||
"Abc"
|
||||
],
|
||||
"properties": {
|
||||
"Abc": {
|
||||
"$ref": "test_package.TestStruct"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`)
|
||||
}
|
||||
|
||||
func TestRoutesWithCommonPart(t *testing.T) {
|
||||
ws1 := new(restful.WebService)
|
||||
ws1.Path("/")
|
||||
ws1.Route(ws1.GET("/foobar").To(dummy).Writes(test_package.TestStruct{}))
|
||||
ws1.Route(ws1.HEAD("/foobar").To(dummy).Writes(test_package.TestStruct{}))
|
||||
ws1.Route(ws1.GET("/foo").To(dummy).Writes([]test_package.TestStruct{}))
|
||||
ws1.Route(ws1.HEAD("/foo").To(dummy).Writes(test_package.TestStruct{}))
|
||||
|
||||
cfg := Config{
|
||||
WebServicesUrl: "http://here.com",
|
||||
ApiPath: "/apipath",
|
||||
WebServices: []*restful.WebService{ws1},
|
||||
}
|
||||
sws := newSwaggerService(cfg)
|
||||
|
||||
decl := sws.composeDeclaration(ws1, "/foo")
|
||||
|
||||
str, err := json.MarshalIndent(decl.Apis, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
compareJson(t, string(str), `[
|
||||
{
|
||||
"path": "/foo",
|
||||
"description": "",
|
||||
"operations": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "test_package.TestStruct"
|
||||
},
|
||||
"method": "GET",
|
||||
"nickname": "dummy",
|
||||
"parameters": []
|
||||
},
|
||||
{
|
||||
"type": "test_package.TestStruct",
|
||||
"method": "HEAD",
|
||||
"nickname": "dummy",
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]`)
|
||||
}
|
||||
|
||||
// go test -v -test.run TestServiceToApi ...swagger
|
||||
func TestServiceToApi(t *testing.T) {
|
||||
ws := new(restful.WebService)
|
||||
ws.Path("/tests")
|
||||
ws.Consumes(restful.MIME_JSON)
|
||||
ws.Produces(restful.MIME_XML)
|
||||
ws.Route(ws.GET("/a").To(dummy).Writes(sample{}))
|
||||
ws.Route(ws.PUT("/b").To(dummy).Writes(sample{}))
|
||||
ws.Route(ws.POST("/c").To(dummy).Writes(sample{}))
|
||||
ws.Route(ws.DELETE("/d").To(dummy).Writes(sample{}))
|
||||
|
||||
ws.Route(ws.GET("/d").To(dummy).Writes(sample{}))
|
||||
ws.Route(ws.PUT("/c").To(dummy).Writes(sample{}))
|
||||
ws.Route(ws.POST("/b").To(dummy).Writes(sample{}))
|
||||
ws.Route(ws.DELETE("/a").To(dummy).Writes(sample{}))
|
||||
ws.ApiVersion("1.2.3")
|
||||
cfg := Config{
|
||||
WebServicesUrl: "http://here.com",
|
||||
ApiPath: "/apipath",
|
||||
WebServices: []*restful.WebService{ws},
|
||||
PostBuildHandler: func(in *ApiDeclarationList) {},
|
||||
}
|
||||
sws := newSwaggerService(cfg)
|
||||
decl := sws.composeDeclaration(ws, "/tests")
|
||||
// checks
|
||||
if decl.ApiVersion != "1.2.3" {
|
||||
t.Errorf("got %v want %v", decl.ApiVersion, "1.2.3")
|
||||
}
|
||||
if decl.BasePath != "http://here.com" {
|
||||
t.Errorf("got %v want %v", decl.BasePath, "http://here.com")
|
||||
}
|
||||
if len(decl.Apis) != 4 {
|
||||
t.Errorf("got %v want %v", len(decl.Apis), 4)
|
||||
}
|
||||
pathOrder := ""
|
||||
for _, each := range decl.Apis {
|
||||
pathOrder += each.Path
|
||||
for _, other := range each.Operations {
|
||||
pathOrder += other.Method
|
||||
}
|
||||
}
|
||||
|
||||
if pathOrder != "/tests/aGETDELETE/tests/bPUTPOST/tests/cPOSTPUT/tests/dDELETEGET" {
|
||||
t.Errorf("got %v want %v", pathOrder, "see test source")
|
||||
}
|
||||
}
|
||||
|
||||
func dummy(i *restful.Request, o *restful.Response) {}
|
||||
|
||||
// go test -v -test.run TestIssue78 ...swagger
|
||||
type Response struct {
|
||||
Code int
|
||||
Users *[]User
|
||||
Items *[]TestItem
|
||||
}
|
||||
type User struct {
|
||||
Id, Name string
|
||||
}
|
||||
type TestItem struct {
|
||||
Id, Name string
|
||||
}
|
||||
|
||||
// clear && go test -v -test.run TestComposeResponseMessages ...swagger
|
||||
func TestComposeResponseMessages(t *testing.T) {
|
||||
responseErrors := map[int]restful.ResponseError{}
|
||||
responseErrors[400] = restful.ResponseError{Code: 400, Message: "Bad Request", Model: TestItem{}}
|
||||
route := restful.Route{ResponseErrors: responseErrors}
|
||||
decl := new(ApiDeclaration)
|
||||
decl.Models = ModelList{}
|
||||
msgs := composeResponseMessages(route, decl, &Config{})
|
||||
if msgs[0].ResponseModel != "swagger.TestItem" {
|
||||
t.Errorf("got %s want swagger.TestItem", msgs[0].ResponseModel)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue78(t *testing.T) {
|
||||
sws := newSwaggerService(Config{})
|
||||
models := new(ModelList)
|
||||
sws.addModelFromSampleTo(&Operation{}, true, Response{Items: &[]TestItem{}}, models)
|
||||
model, ok := models.At("swagger.Response")
|
||||
if !ok {
|
||||
t.Fatal("missing response model")
|
||||
}
|
||||
if "swagger.Response" != model.Id {
|
||||
t.Fatal("wrong model id:" + model.Id)
|
||||
}
|
||||
code, ok := model.Properties.At("Code")
|
||||
if !ok {
|
||||
t.Fatal("missing code")
|
||||
}
|
||||
if "integer" != *code.Type {
|
||||
t.Fatal("wrong code type:" + *code.Type)
|
||||
}
|
||||
items, ok := model.Properties.At("Items")
|
||||
if !ok {
|
||||
t.Fatal("missing items")
|
||||
}
|
||||
if "array" != *items.Type {
|
||||
t.Fatal("wrong items type:" + *items.Type)
|
||||
}
|
||||
items_items := items.Items
|
||||
if items_items == nil {
|
||||
t.Fatal("missing items->items")
|
||||
}
|
||||
ref := items_items.Ref
|
||||
if ref == nil {
|
||||
t.Fatal("missing $ref")
|
||||
}
|
||||
if *ref != "swagger.TestItem" {
|
||||
t.Fatal("wrong $ref:" + *ref)
|
||||
}
|
||||
}
|
5
vendor/github.com/emicklei/go-restful-swagger12/test_package/struct.go
generated
vendored
Normal file
5
vendor/github.com/emicklei/go-restful-swagger12/test_package/struct.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
package test_package
|
||||
|
||||
type TestStruct struct {
|
||||
TestField string
|
||||
}
|
86
vendor/github.com/emicklei/go-restful-swagger12/utils_test.go
generated
vendored
Normal file
86
vendor/github.com/emicklei/go-restful-swagger12/utils_test.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
package swagger
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testJsonFromStructWithConfig(t *testing.T, sample interface{}, expectedJson string, config *Config) bool {
|
||||
m := modelsFromStructWithConfig(sample, config)
|
||||
data, _ := json.MarshalIndent(m, " ", " ")
|
||||
return compareJson(t, string(data), expectedJson)
|
||||
}
|
||||
|
||||
func modelsFromStructWithConfig(sample interface{}, config *Config) *ModelList {
|
||||
models := new(ModelList)
|
||||
builder := modelBuilder{Models: models, Config: config}
|
||||
builder.addModelFrom(sample)
|
||||
return models
|
||||
}
|
||||
|
||||
func testJsonFromStruct(t *testing.T, sample interface{}, expectedJson string) bool {
|
||||
return testJsonFromStructWithConfig(t, sample, expectedJson, &Config{})
|
||||
}
|
||||
|
||||
func modelsFromStruct(sample interface{}) *ModelList {
|
||||
return modelsFromStructWithConfig(sample, &Config{})
|
||||
}
|
||||
|
||||
func compareJson(t *testing.T, actualJsonAsString string, expectedJsonAsString string) bool {
|
||||
success := false
|
||||
var actualMap map[string]interface{}
|
||||
json.Unmarshal([]byte(actualJsonAsString), &actualMap)
|
||||
var expectedMap map[string]interface{}
|
||||
err := json.Unmarshal([]byte(expectedJsonAsString), &expectedMap)
|
||||
if err != nil {
|
||||
var actualArray []interface{}
|
||||
json.Unmarshal([]byte(actualJsonAsString), &actualArray)
|
||||
var expectedArray []interface{}
|
||||
err := json.Unmarshal([]byte(expectedJsonAsString), &expectedArray)
|
||||
success = reflect.DeepEqual(actualArray, expectedArray)
|
||||
if err != nil {
|
||||
t.Fatalf("Unparsable expected JSON: %s, actual: %v, expected: %v", err, actualJsonAsString, expectedJsonAsString)
|
||||
}
|
||||
} else {
|
||||
success = reflect.DeepEqual(actualMap, expectedMap)
|
||||
}
|
||||
if !success {
|
||||
t.Log("---- expected -----")
|
||||
t.Log(withLineNumbers(expectedJsonAsString))
|
||||
t.Log("---- actual -----")
|
||||
t.Log(withLineNumbers(actualJsonAsString))
|
||||
t.Log("---- raw -----")
|
||||
t.Log(actualJsonAsString)
|
||||
t.Error("there are differences")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func indexOfNonMatchingLine(actual, expected string) int {
|
||||
a := strings.Split(actual, "\n")
|
||||
e := strings.Split(expected, "\n")
|
||||
size := len(a)
|
||||
if len(e) < len(a) {
|
||||
size = len(e)
|
||||
}
|
||||
for i := 0; i < size; i++ {
|
||||
if a[i] != e[i] {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func withLineNumbers(content string) string {
|
||||
var buffer bytes.Buffer
|
||||
lines := strings.Split(content, "\n")
|
||||
for i, each := range lines {
|
||||
buffer.WriteString(fmt.Sprintf("%d:%s\n", i, each))
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
3
vendor/github.com/emicklei/go-restful/CHANGES.md
generated
vendored
3
vendor/github.com/emicklei/go-restful/CHANGES.md
generated
vendored
|
@ -1,5 +1,8 @@
|
|||
Change history of go-restful
|
||||
=
|
||||
2017-09-13
|
||||
- added route condition functions using `.If(func)` in route building.
|
||||
|
||||
2017-02-16
|
||||
- solved issue #304, make operation names unique
|
||||
|
||||
|
|
5
vendor/github.com/emicklei/go-restful/README.md
generated
vendored
5
vendor/github.com/emicklei/go-restful/README.md
generated
vendored
|
@ -56,7 +56,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
|
|||
- Content encoding (gzip,deflate) of request and response payloads
|
||||
- Automatic responses on OPTIONS (using a filter)
|
||||
- Automatic CORS request handling (using a filter)
|
||||
- API declaration for Swagger UI (see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12),[go-restful-openapi](https://github.com/emicklei/go-restful-openapi))
|
||||
- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12))
|
||||
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
|
||||
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
||||
- Configurable (trace) logging
|
||||
|
@ -67,8 +67,9 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
|
|||
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
|
||||
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
|
||||
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
|
||||
- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia)
|
||||
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
|
||||
|
||||
Type ```git shortlog -s``` for a full list of contributors.
|
||||
|
||||
© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
||||
© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
||||
|
|
51
vendor/github.com/emicklei/go-restful/bench_curly_test.go
generated
vendored
Normal file
51
vendor/github.com/emicklei/go-restful/bench_curly_test.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package restful
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func setupCurly(container *Container) []string {
|
||||
wsCount := 26
|
||||
rtCount := 26
|
||||
urisCurly := []string{}
|
||||
|
||||
container.Router(CurlyRouter{})
|
||||
for i := 0; i < wsCount; i++ {
|
||||
root := fmt.Sprintf("/%s/{%s}/", string(i+97), string(i+97))
|
||||
ws := new(WebService).Path(root)
|
||||
for j := 0; j < rtCount; j++ {
|
||||
sub := fmt.Sprintf("/%s2/{%s2}", string(j+97), string(j+97))
|
||||
ws.Route(ws.GET(sub).Consumes("application/xml").Produces("application/xml").To(echoCurly))
|
||||
}
|
||||
container.Add(ws)
|
||||
for _, each := range ws.Routes() {
|
||||
urisCurly = append(urisCurly, "http://bench.com"+each.Path)
|
||||
}
|
||||
}
|
||||
return urisCurly
|
||||
}
|
||||
|
||||
func echoCurly(req *Request, resp *Response) {}
|
||||
|
||||
func BenchmarkManyCurly(b *testing.B) {
|
||||
container := NewContainer()
|
||||
urisCurly := setupCurly(container)
|
||||
b.ResetTimer()
|
||||
for t := 0; t < b.N; t++ {
|
||||
for r := 0; r < 1000; r++ {
|
||||
for _, each := range urisCurly {
|
||||
sendNoReturnTo(each, container, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sendNoReturnTo(address string, container *Container, t int) {
|
||||
httpRequest, _ := http.NewRequest("GET", address, nil)
|
||||
httpRequest.Header.Set("Accept", "application/xml")
|
||||
httpWriter := httptest.NewRecorder()
|
||||
container.dispatch(httpWriter, httpRequest)
|
||||
}
|
43
vendor/github.com/emicklei/go-restful/bench_test.go
generated
vendored
Normal file
43
vendor/github.com/emicklei/go-restful/bench_test.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
package restful
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var uris = []string{}
|
||||
|
||||
func setup(container *Container) {
|
||||
wsCount := 26
|
||||
rtCount := 26
|
||||
|
||||
for i := 0; i < wsCount; i++ {
|
||||
root := fmt.Sprintf("/%s/{%s}/", string(i+97), string(i+97))
|
||||
ws := new(WebService).Path(root)
|
||||
for j := 0; j < rtCount; j++ {
|
||||
sub := fmt.Sprintf("/%s2/{%s2}", string(j+97), string(j+97))
|
||||
ws.Route(ws.GET(sub).To(echo))
|
||||
}
|
||||
container.Add(ws)
|
||||
for _, each := range ws.Routes() {
|
||||
uris = append(uris, "http://bench.com"+each.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func echo(req *Request, resp *Response) {
|
||||
io.WriteString(resp.ResponseWriter, "echo")
|
||||
}
|
||||
|
||||
func BenchmarkMany(b *testing.B) {
|
||||
container := NewContainer()
|
||||
setup(container)
|
||||
b.ResetTimer()
|
||||
for t := 0; t < b.N; t++ {
|
||||
for _, each := range uris {
|
||||
// println(each)
|
||||
sendItTo(each, container)
|
||||
}
|
||||
}
|
||||
}
|
125
vendor/github.com/emicklei/go-restful/compress_test.go
generated
vendored
Normal file
125
vendor/github.com/emicklei/go-restful/compress_test.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
package restful
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// go test -v -test.run TestGzip ...restful
|
||||
func TestGzip(t *testing.T) {
|
||||
EnableContentEncoding = true
|
||||
httpRequest, _ := http.NewRequest("GET", "/test", nil)
|
||||
httpRequest.Header.Set("Accept-Encoding", "gzip,deflate")
|
||||
httpWriter := httptest.NewRecorder()
|
||||
wanted, encoding := wantsCompressedResponse(httpRequest)
|
||||
if !wanted {
|
||||
t.Fatal("should accept gzip")
|
||||
}
|
||||
if encoding != "gzip" {
|
||||
t.Fatal("expected gzip")
|
||||
}
|
||||
c, err := NewCompressingResponseWriter(httpWriter, encoding)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
c.Write([]byte("Hello World"))
|
||||
c.Close()
|
||||
if httpWriter.Header().Get("Content-Encoding") != "gzip" {
|
||||
t.Fatal("Missing gzip header")
|
||||
}
|
||||
reader, err := gzip.NewReader(httpWriter.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if got, want := string(data), "Hello World"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeflate(t *testing.T) {
|
||||
EnableContentEncoding = true
|
||||
httpRequest, _ := http.NewRequest("GET", "/test", nil)
|
||||
httpRequest.Header.Set("Accept-Encoding", "deflate,gzip")
|
||||
httpWriter := httptest.NewRecorder()
|
||||
wanted, encoding := wantsCompressedResponse(httpRequest)
|
||||
if !wanted {
|
||||
t.Fatal("should accept deflate")
|
||||
}
|
||||
if encoding != "deflate" {
|
||||
t.Fatal("expected deflate")
|
||||
}
|
||||
c, err := NewCompressingResponseWriter(httpWriter, encoding)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
c.Write([]byte("Hello World"))
|
||||
c.Close()
|
||||
if httpWriter.Header().Get("Content-Encoding") != "deflate" {
|
||||
t.Fatal("Missing deflate header")
|
||||
}
|
||||
reader, err := zlib.NewReader(httpWriter.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if got, want := string(data), "Hello World"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGzipDecompressRequestBody(t *testing.T) {
|
||||
b := new(bytes.Buffer)
|
||||
w := newGzipWriter()
|
||||
w.Reset(b)
|
||||
io.WriteString(w, `{"msg":"hi"}`)
|
||||
w.Flush()
|
||||
w.Close()
|
||||
|
||||
req := new(Request)
|
||||
httpRequest, _ := http.NewRequest("GET", "/", bytes.NewReader(b.Bytes()))
|
||||
httpRequest.Header.Set("Content-Type", "application/json")
|
||||
httpRequest.Header.Set("Content-Encoding", "gzip")
|
||||
req.Request = httpRequest
|
||||
|
||||
doc := make(map[string]interface{})
|
||||
req.ReadEntity(&doc)
|
||||
|
||||
if got, want := doc["msg"], "hi"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestZlibDecompressRequestBody(t *testing.T) {
|
||||
b := new(bytes.Buffer)
|
||||
w := newZlibWriter()
|
||||
w.Reset(b)
|
||||
io.WriteString(w, `{"msg":"hi"}`)
|
||||
w.Flush()
|
||||
w.Close()
|
||||
|
||||
req := new(Request)
|
||||
httpRequest, _ := http.NewRequest("GET", "/", bytes.NewReader(b.Bytes()))
|
||||
httpRequest.Header.Set("Content-Type", "application/json")
|
||||
httpRequest.Header.Set("Content-Encoding", "deflate")
|
||||
req.Request = httpRequest
|
||||
|
||||
doc := make(map[string]interface{})
|
||||
req.ReadEntity(&doc)
|
||||
|
||||
if got, want := doc["msg"], "hi"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
8
vendor/github.com/emicklei/go-restful/compressors.go
generated
vendored
8
vendor/github.com/emicklei/go-restful/compressors.go
generated
vendored
|
@ -15,20 +15,20 @@ type CompressorProvider interface {
|
|||
// Before using it, call Reset().
|
||||
AcquireGzipWriter() *gzip.Writer
|
||||
|
||||
// Releases an aqcuired *gzip.Writer.
|
||||
// Releases an acquired *gzip.Writer.
|
||||
ReleaseGzipWriter(w *gzip.Writer)
|
||||
|
||||
// Returns a *gzip.Reader which needs to be released later.
|
||||
AcquireGzipReader() *gzip.Reader
|
||||
|
||||
// Releases an aqcuired *gzip.Reader.
|
||||
// Releases an acquired *gzip.Reader.
|
||||
ReleaseGzipReader(w *gzip.Reader)
|
||||
|
||||
// Returns a *zlib.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
AcquireZlibWriter() *zlib.Writer
|
||||
|
||||
// Releases an aqcuired *zlib.Writer.
|
||||
// Releases an acquired *zlib.Writer.
|
||||
ReleaseZlibWriter(w *zlib.Writer)
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ func CurrentCompressorProvider() CompressorProvider {
|
|||
return currentCompressorProvider
|
||||
}
|
||||
|
||||
// CompressorProvider sets the actual provider of compressors (zlib or gzip).
|
||||
// SetCompressorProvider sets the actual provider of compressors (zlib or gzip).
|
||||
func SetCompressorProvider(p CompressorProvider) {
|
||||
if p == nil {
|
||||
panic("cannot set compressor provider to nil")
|
||||
|
|
2
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
2
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
|
@ -140,7 +140,7 @@ func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) boo
|
|||
func (c *Container) Remove(ws *WebService) error {
|
||||
if c.ServeMux == http.DefaultServeMux {
|
||||
errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
|
||||
log.Printf(errMsg)
|
||||
log.Print(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
c.webServicesLock.Lock()
|
||||
|
|
83
vendor/github.com/emicklei/go-restful/container_test.go
generated
vendored
Normal file
83
vendor/github.com/emicklei/go-restful/container_test.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package restful
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// go test -v -test.run TestContainer_computeAllowedMethods ...restful
|
||||
func TestContainer_computeAllowedMethods(t *testing.T) {
|
||||
wc := NewContainer()
|
||||
ws1 := new(WebService).Path("/users")
|
||||
ws1.Route(ws1.GET("{i}").To(dummy))
|
||||
ws1.Route(ws1.POST("{i}").To(dummy))
|
||||
wc.Add(ws1)
|
||||
httpRequest, _ := http.NewRequest("GET", "http://api.his.com/users/1", nil)
|
||||
rreq := Request{Request: httpRequest}
|
||||
m := wc.computeAllowedMethods(&rreq)
|
||||
if len(m) != 2 {
|
||||
t.Errorf("got %d expected 2 methods, %v", len(m), m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainer_HandleWithFilter(t *testing.T) {
|
||||
prefilterCalled := false
|
||||
postfilterCalled := false
|
||||
httpHandlerCalled := false
|
||||
|
||||
wc := NewContainer()
|
||||
wc.Filter(func(request *Request, response *Response, chain *FilterChain) {
|
||||
prefilterCalled = true
|
||||
chain.ProcessFilter(request, response)
|
||||
})
|
||||
wc.HandleWithFilter("/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
httpHandlerCalled = true
|
||||
w.Write([]byte("ok"))
|
||||
}))
|
||||
wc.Filter(func(request *Request, response *Response, chain *FilterChain) {
|
||||
postfilterCalled = true
|
||||
chain.ProcessFilter(request, response)
|
||||
})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
request, _ := http.NewRequest("GET", "/", nil)
|
||||
wc.ServeHTTP(recorder, request)
|
||||
if recorder.Code != http.StatusOK {
|
||||
t.Errorf("unexpected code %d", recorder.Code)
|
||||
}
|
||||
if recorder.Body.String() != "ok" {
|
||||
t.Errorf("unexpected body %s", recorder.Body.String())
|
||||
}
|
||||
if !prefilterCalled {
|
||||
t.Errorf("filter added before calling HandleWithFilter wasn't called")
|
||||
}
|
||||
if !postfilterCalled {
|
||||
t.Errorf("filter added after calling HandleWithFilter wasn't called")
|
||||
}
|
||||
if !httpHandlerCalled {
|
||||
t.Errorf("handler added by calling HandleWithFilter wasn't called")
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerAddAndRemove(t *testing.T) {
|
||||
ws1 := new(WebService).Path("/")
|
||||
ws2 := new(WebService).Path("/users")
|
||||
wc := NewContainer()
|
||||
wc.Add(ws1)
|
||||
wc.Add(ws2)
|
||||
wc.Remove(ws2)
|
||||
if len(wc.webServices) != 1 {
|
||||
t.Errorf("expected one webservices")
|
||||
}
|
||||
if !wc.isRegisteredOnRoot {
|
||||
t.Errorf("expected on root registered")
|
||||
}
|
||||
wc.Remove(ws1)
|
||||
if len(wc.webServices) > 0 {
|
||||
t.Errorf("expected zero webservices")
|
||||
}
|
||||
if wc.isRegisteredOnRoot {
|
||||
t.Errorf("expected not on root registered")
|
||||
}
|
||||
}
|
129
vendor/github.com/emicklei/go-restful/cors_filter_test.go
generated
vendored
Normal file
129
vendor/github.com/emicklei/go-restful/cors_filter_test.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
package restful
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// go test -v -test.run TestCORSFilter_Preflight ...restful
|
||||
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
|
||||
func TestCORSFilter_Preflight(t *testing.T) {
|
||||
tearDown()
|
||||
ws := new(WebService)
|
||||
ws.Route(ws.PUT("/cors").To(dummy))
|
||||
Add(ws)
|
||||
|
||||
cors := CrossOriginResourceSharing{
|
||||
ExposeHeaders: []string{"X-Custom-Header"},
|
||||
AllowedHeaders: []string{"X-Custom-Header", "X-Additional-Header"},
|
||||
CookiesAllowed: true,
|
||||
Container: DefaultContainer}
|
||||
Filter(cors.Filter)
|
||||
|
||||
// Preflight
|
||||
httpRequest, _ := http.NewRequest("OPTIONS", "http://api.alice.com/cors", nil)
|
||||
httpRequest.Method = "OPTIONS"
|
||||
httpRequest.Header.Set(HEADER_Origin, "http://api.bob.com")
|
||||
httpRequest.Header.Set(HEADER_AccessControlRequestMethod, "PUT")
|
||||
httpRequest.Header.Set(HEADER_AccessControlRequestHeaders, "X-Custom-Header, X-Additional-Header")
|
||||
|
||||
httpWriter := httptest.NewRecorder()
|
||||
DefaultContainer.Dispatch(httpWriter, httpRequest)
|
||||
|
||||
actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
|
||||
if "http://api.bob.com" != actual {
|
||||
t.Fatal("expected: http://api.bob.com but got:" + actual)
|
||||
}
|
||||
actual = httpWriter.Header().Get(HEADER_AccessControlAllowMethods)
|
||||
if "PUT" != actual {
|
||||
t.Fatal("expected: PUT but got:" + actual)
|
||||
}
|
||||
actual = httpWriter.Header().Get(HEADER_AccessControlAllowHeaders)
|
||||
if "X-Custom-Header, X-Additional-Header" != actual {
|
||||
t.Fatal("expected: X-Custom-Header, X-Additional-Header but got:" + actual)
|
||||
}
|
||||
|
||||
if !cors.isOriginAllowed("somewhere") {
|
||||
t.Fatal("origin expected to be allowed")
|
||||
}
|
||||
cors.AllowedDomains = []string{"overthere.com"}
|
||||
if cors.isOriginAllowed("somewhere") {
|
||||
t.Fatal("origin [somewhere] expected NOT to be allowed")
|
||||
}
|
||||
if !cors.isOriginAllowed("overthere.com") {
|
||||
t.Fatal("origin [overthere] expected to be allowed")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// go test -v -test.run TestCORSFilter_Actual ...restful
|
||||
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
|
||||
func TestCORSFilter_Actual(t *testing.T) {
|
||||
tearDown()
|
||||
ws := new(WebService)
|
||||
ws.Route(ws.PUT("/cors").To(dummy))
|
||||
Add(ws)
|
||||
|
||||
cors := CrossOriginResourceSharing{
|
||||
ExposeHeaders: []string{"X-Custom-Header"},
|
||||
AllowedHeaders: []string{"X-Custom-Header", "X-Additional-Header"},
|
||||
CookiesAllowed: true,
|
||||
Container: DefaultContainer}
|
||||
Filter(cors.Filter)
|
||||
|
||||
// Actual
|
||||
httpRequest, _ := http.NewRequest("PUT", "http://api.alice.com/cors", nil)
|
||||
httpRequest.Header.Set(HEADER_Origin, "http://api.bob.com")
|
||||
httpRequest.Header.Set("X-Custom-Header", "value")
|
||||
|
||||
httpWriter := httptest.NewRecorder()
|
||||
DefaultContainer.Dispatch(httpWriter, httpRequest)
|
||||
actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
|
||||
if "http://api.bob.com" != actual {
|
||||
t.Fatal("expected: http://api.bob.com but got:" + actual)
|
||||
}
|
||||
if httpWriter.Body.String() != "dummy" {
|
||||
t.Fatal("expected: dummy but got:" + httpWriter.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
var allowedDomainInput = []struct {
|
||||
domains []string
|
||||
origin string
|
||||
allowed bool
|
||||
}{
|
||||
{[]string{}, "http://anything.com", true},
|
||||
{[]string{"example.com"}, "example.com", true},
|
||||
{[]string{"example.com"}, "not-allowed", false},
|
||||
{[]string{"not-matching.com", "example.com"}, "example.com", true},
|
||||
{[]string{".*"}, "example.com", true},
|
||||
}
|
||||
|
||||
// go test -v -test.run TestCORSFilter_AllowedDomains ...restful
|
||||
func TestCORSFilter_AllowedDomains(t *testing.T) {
|
||||
for _, each := range allowedDomainInput {
|
||||
tearDown()
|
||||
ws := new(WebService)
|
||||
ws.Route(ws.PUT("/cors").To(dummy))
|
||||
Add(ws)
|
||||
|
||||
cors := CrossOriginResourceSharing{
|
||||
AllowedDomains: each.domains,
|
||||
CookiesAllowed: true,
|
||||
Container: DefaultContainer}
|
||||
Filter(cors.Filter)
|
||||
|
||||
httpRequest, _ := http.NewRequest("PUT", "http://api.his.com/cors", nil)
|
||||
httpRequest.Header.Set(HEADER_Origin, each.origin)
|
||||
httpWriter := httptest.NewRecorder()
|
||||
DefaultContainer.Dispatch(httpWriter, httpRequest)
|
||||
actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
|
||||
if actual != each.origin && each.allowed {
|
||||
t.Fatal("expected to be accepted")
|
||||
}
|
||||
if actual == each.origin && !each.allowed {
|
||||
t.Fatal("did not expect to be accepted")
|
||||
}
|
||||
}
|
||||
}
|
231
vendor/github.com/emicklei/go-restful/curly_test.go
generated
vendored
Normal file
231
vendor/github.com/emicklei/go-restful/curly_test.go
generated
vendored
Normal file
|
@ -0,0 +1,231 @@
|
|||
package restful
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var requestPaths = []struct {
|
||||
// url with path (1) is handled by service with root (2) and remainder has value final (3)
|
||||
path, root string
|
||||
}{
|
||||
{"/", "/"},
|
||||
{"/p", "/p"},
|
||||
{"/p/x", "/p/{q}"},
|
||||
{"/q/x", "/q"},
|
||||
{"/p/x/", "/p/{q}"},
|
||||
{"/p/x/y", "/p/{q}"},
|
||||
{"/q/x/y", "/q"},
|
||||
{"/z/q", "/{p}/q"},
|
||||
{"/a/b/c/q", "/"},
|
||||
}
|
||||
|
||||
// go test -v -test.run TestCurlyDetectWebService ...restful
|
||||
func TestCurlyDetectWebService(t *testing.T) {
|
||||
ws1 := new(WebService).Path("/")
|
||||
ws2 := new(WebService).Path("/p")
|
||||
ws3 := new(WebService).Path("/q")
|
||||
ws4 := new(WebService).Path("/p/q")
|
||||
ws5 := new(WebService).Path("/p/{q}")
|
||||
ws7 := new(WebService).Path("/{p}/q")
|
||||
var wss = []*WebService{ws1, ws2, ws3, ws4, ws5, ws7}
|
||||
|
||||
for _, each := range wss {
|
||||
t.Logf("path=%s,toks=%v\n", each.pathExpr.Source, each.pathExpr.tokens)
|
||||
}
|
||||
|
||||
router := CurlyRouter{}
|
||||
|
||||
ok := true
|
||||
for i, fixture := range requestPaths {
|
||||
requestTokens := tokenizePath(fixture.path)
|
||||
who := router.detectWebService(requestTokens, wss)
|
||||
if who != nil && who.RootPath() != fixture.root {
|
||||
t.Logf("[line:%v] Unexpected dispatcher, expected:%v, actual:%v", i, fixture.root, who.RootPath())
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
var serviceDetects = []struct {
|
||||
path string
|
||||
found bool
|
||||
root string
|
||||
}{
|
||||
{"/a/b", true, "/{p}/{q}/{r}"},
|
||||
{"/p/q", true, "/p/q"},
|
||||
{"/q/p", true, "/q"},
|
||||
{"/", true, "/"},
|
||||
{"/p/q/r", true, "/p/q"},
|
||||
}
|
||||
|
||||
// go test -v -test.run Test_detectWebService ...restful
|
||||
func Test_detectWebService(t *testing.T) {
|
||||
router := CurlyRouter{}
|
||||
ws1 := new(WebService).Path("/")
|
||||
ws2 := new(WebService).Path("/p")
|
||||
ws3 := new(WebService).Path("/q")
|
||||
ws4 := new(WebService).Path("/p/q")
|
||||
ws5 := new(WebService).Path("/p/{q}")
|
||||
ws6 := new(WebService).Path("/p/{q}/")
|
||||
ws7 := new(WebService).Path("/{p}/q")
|
||||
ws8 := new(WebService).Path("/{p}/{q}/{r}")
|
||||
var wss = []*WebService{ws8, ws7, ws6, ws5, ws4, ws3, ws2, ws1}
|
||||
for _, fix := range serviceDetects {
|
||||
requestPath := fix.path
|
||||
requestTokens := tokenizePath(requestPath)
|
||||
for _, ws := range wss {
|
||||
serviceTokens := ws.pathExpr.tokens
|
||||
matches, score := router.computeWebserviceScore(requestTokens, serviceTokens)
|
||||
t.Logf("req=%s,toks:%v,ws=%s,toks:%v,score=%d,matches=%v", requestPath, requestTokens, ws.RootPath(), serviceTokens, score, matches)
|
||||
}
|
||||
best := router.detectWebService(requestTokens, wss)
|
||||
if best != nil {
|
||||
if fix.found {
|
||||
t.Logf("best=%s", best.RootPath())
|
||||
} else {
|
||||
t.Fatalf("should have found:%s", fix.root)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var routeMatchers = []struct {
|
||||
route string
|
||||
path string
|
||||
matches bool
|
||||
paramCount int
|
||||
staticCount int
|
||||
}{
|
||||
// route, request-path
|
||||
{"/a", "/a", true, 0, 1},
|
||||
{"/a", "/b", false, 0, 0},
|
||||
{"/a", "/b", false, 0, 0},
|
||||
{"/a/{b}/c/", "/a/2/c", true, 1, 2},
|
||||
{"/{a}/{b}/{c}/", "/a/b", false, 0, 0},
|
||||
{"/{x:*}", "/", false, 0, 0},
|
||||
{"/{x:*}", "/a", true, 1, 0},
|
||||
{"/{x:*}", "/a/b", true, 1, 0},
|
||||
{"/a/{x:*}", "/a/b", true, 1, 1},
|
||||
{"/a/{x:[A-Z][A-Z]}", "/a/ZX", true, 1, 1},
|
||||
{"/basepath/{resource:*}", "/basepath/some/other/location/test.xml", true, 1, 1},
|
||||
}
|
||||
|
||||
// clear && go test -v -test.run Test_matchesRouteByPathTokens ...restful
|
||||
func Test_matchesRouteByPathTokens(t *testing.T) {
|
||||
router := CurlyRouter{}
|
||||
for i, each := range routeMatchers {
|
||||
routeToks := tokenizePath(each.route)
|
||||
reqToks := tokenizePath(each.path)
|
||||
matches, pCount, sCount := router.matchesRouteByPathTokens(routeToks, reqToks)
|
||||
if matches != each.matches {
|
||||
t.Fatalf("[%d] unexpected matches outcome route:%s, path:%s, matches:%v", i, each.route, each.path, matches)
|
||||
}
|
||||
if pCount != each.paramCount {
|
||||
t.Fatalf("[%d] unexpected paramCount got:%d want:%d ", i, pCount, each.paramCount)
|
||||
}
|
||||
if sCount != each.staticCount {
|
||||
t.Fatalf("[%d] unexpected staticCount got:%d want:%d ", i, sCount, each.staticCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// clear && go test -v -test.run TestExtractParameters_Wildcard1 ...restful
|
||||
func TestExtractParameters_Wildcard1(t *testing.T) {
|
||||
params := doExtractParams("/fixed/{var:*}", 2, "/fixed/remainder", t)
|
||||
if params["var"] != "remainder" {
|
||||
t.Errorf("parameter mismatch var: %s", params["var"])
|
||||
}
|
||||
}
|
||||
|
||||
// clear && go test -v -test.run TestExtractParameters_Wildcard2 ...restful
|
||||
func TestExtractParameters_Wildcard2(t *testing.T) {
|
||||
params := doExtractParams("/fixed/{var:*}", 2, "/fixed/remain/der", t)
|
||||
if params["var"] != "remain/der" {
|
||||
t.Errorf("parameter mismatch var: %s", params["var"])
|
||||
}
|
||||
}
|
||||
|
||||
// clear && go test -v -test.run TestExtractParameters_Wildcard3 ...restful
|
||||
func TestExtractParameters_Wildcard3(t *testing.T) {
|
||||
params := doExtractParams("/static/{var:*}", 2, "/static/test/sub/hi.html", t)
|
||||
if params["var"] != "test/sub/hi.html" {
|
||||
t.Errorf("parameter mismatch var: %s", params["var"])
|
||||
}
|
||||
}
|
||||
|
||||
// clear && go test -v -test.run TestCurly_ISSUE_34 ...restful
|
||||
func TestCurly_ISSUE_34(t *testing.T) {
|
||||
ws1 := new(WebService).Path("/")
|
||||
ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy))
|
||||
ws1.Route(ws1.GET("/network/{id}").To(curlyDummy))
|
||||
croutes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12"))
|
||||
if len(croutes) != 2 {
|
||||
t.Fatal("expected 2 routes")
|
||||
}
|
||||
if got, want := croutes[0].route.Path, "/network/{id}"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// clear && go test -v -test.run TestCurly_ISSUE_34_2 ...restful
|
||||
func TestCurly_ISSUE_34_2(t *testing.T) {
|
||||
ws1 := new(WebService)
|
||||
ws1.Route(ws1.GET("/network/{id}").To(curlyDummy))
|
||||
ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy))
|
||||
croutes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12"))
|
||||
if len(croutes) != 2 {
|
||||
t.Fatal("expected 2 routes")
|
||||
}
|
||||
if got, want := croutes[0].route.Path, "/network/{id}"; got != want {
|
||||
t.Errorf("got %v want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// clear && go test -v -test.run TestCurly_JsonHtml ...restful
|
||||
func TestCurly_JsonHtml(t *testing.T) {
|
||||
ws1 := new(WebService)
|
||||
ws1.Path("/")
|
||||
ws1.Route(ws1.GET("/some.html").To(curlyDummy).Consumes("*/*").Produces("text/html"))
|
||||
req, _ := http.NewRequest("GET", "/some.html", nil)
|
||||
req.Header.Set("Accept", "application/json")
|
||||
_, route, err := CurlyRouter{}.SelectRoute([]*WebService{ws1}, req)
|
||||
if err == nil {
|
||||
t.Error("error expected")
|
||||
}
|
||||
if route != nil {
|
||||
t.Error("no route expected")
|
||||
}
|
||||
}
|
||||
|
||||
// go test -v -test.run TestCurly_ISSUE_137 ...restful
|
||||
func TestCurly_ISSUE_137(t *testing.T) {
|
||||
ws1 := new(WebService)
|
||||
ws1.Route(ws1.GET("/hello").To(curlyDummy))
|
||||
ws1.Path("/")
|
||||
req, _ := http.NewRequest("GET", "/", nil)
|
||||
_, route, _ := CurlyRouter{}.SelectRoute([]*WebService{ws1}, req)
|
||||
t.Log(route)
|
||||
if route != nil {
|
||||
t.Error("no route expected")
|
||||
}
|
||||
}
|
||||
|
||||
// go test -v -test.run TestCurly_ISSUE_137_2 ...restful
|
||||
func TestCurly_ISSUE_137_2(t *testing.T) {
|
||||
ws1 := new(WebService)
|
||||
ws1.Route(ws1.GET("/hello").To(curlyDummy))
|
||||
ws1.Path("/")
|
||||
req, _ := http.NewRequest("GET", "/hello/bob", nil)
|
||||
_, route, _ := CurlyRouter{}.SelectRoute([]*WebService{ws1}, req)
|
||||
t.Log(route)
|
||||
if route != nil {
|
||||
t.Errorf("no route expected, got %v", route)
|
||||
}
|
||||
}
|
||||
|
||||
func curlyDummy(req *Request, resp *Response) { io.WriteString(resp.ResponseWriter, "curlyDummy") }
|
41
vendor/github.com/emicklei/go-restful/doc_examples_test.go
generated
vendored
Normal file
41
vendor/github.com/emicklei/go-restful/doc_examples_test.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package restful
|
||||
|
||||
import "net/http"
|
||||
|
||||
func ExampleOPTIONSFilter() {
|
||||
// Install the OPTIONS filter on the default Container
|
||||
Filter(OPTIONSFilter())
|
||||
}
|
||||
func ExampleContainer_OPTIONSFilter() {
|
||||
// Install the OPTIONS filter on a Container
|
||||
myContainer := new(Container)
|
||||
myContainer.Filter(myContainer.OPTIONSFilter)
|
||||
}
|
||||
|
||||
func ExampleContainer() {
|
||||
// The Default container of go-restful uses the http.DefaultServeMux.
|
||||
// You can create your own Container using restful.NewContainer() and create a new http.Server for that particular container
|
||||
|
||||
ws := new(WebService)
|
||||
wsContainer := NewContainer()
|
||||
wsContainer.Add(ws)
|
||||
server := &http.Server{Addr: ":8080", Handler: wsContainer}
|
||||
server.ListenAndServe()
|
||||
}
|
||||
|
||||
func ExampleCrossOriginResourceSharing() {
|
||||
// To install this filter on the Default Container use:
|
||||
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
|
||||
Filter(cors.Filter)
|
||||
}
|
||||
|
||||
func ExampleServiceError() {
|
||||
resp := new(Response)
|
||||
resp.WriteEntity(NewError(http.StatusBadRequest, "Non-integer {id} path parameter"))
|
||||
}
|
||||
|
||||
func ExampleBoundedCachedCompressors() {
|
||||
// Register a compressor provider (gzip/deflate read/write) that uses
|
||||
// a bounded cache with a maximum of 20 writers and 20 readers.
|
||||
SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
|
||||
}
|
69
vendor/github.com/emicklei/go-restful/entity_accessors_test.go
generated
vendored
Normal file
69
vendor/github.com/emicklei/go-restful/entity_accessors_test.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
package restful
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type keyvalue struct {
|
||||
readCalled bool
|
||||
writeCalled bool
|
||||
}
|
||||
|
||||
func (kv *keyvalue) Read(req *Request, v interface{}) error {
|
||||
//t := reflect.TypeOf(v)
|
||||
//rv := reflect.ValueOf(v)
|
||||
kv.readCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kv *keyvalue) Write(resp *Response, status int, v interface{}) error {
|
||||
t := reflect.TypeOf(v)
|
||||
rv := reflect.ValueOf(v)
|
||||
for ix := 0; ix < t.NumField(); ix++ {
|
||||
sf := t.Field(ix)
|
||||
io.WriteString(resp, sf.Name)
|
||||
io.WriteString(resp, "=")
|
||||
io.WriteString(resp, fmt.Sprintf("%v\n", rv.Field(ix).Interface()))
|
||||
}
|
||||
kv.writeCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// go test -v -test.run TestKeyValueEncoding ...restful
|
||||
func TestKeyValueEncoding(t *testing.T) {
|
||||
type Book struct {
|
||||
Title string
|
||||
Author string
|
||||
PublishedYear int
|
||||
}
|
||||
kv := new(keyvalue)
|
||||
RegisterEntityAccessor("application/kv", kv)
|
||||
b := Book{"Singing for Dummies", "john doe", 2015}
|
||||
|
||||
// Write
|
||||
httpWriter := httptest.NewRecorder()
|
||||
// Accept Produces
|
||||
resp := Response{ResponseWriter: httpWriter, requestAccept: "application/kv,*/*;q=0.8", routeProduces: []string{"application/kv"}, prettyPrint: true}
|
||||
resp.WriteEntity(b)
|
||||
t.Log(string(httpWriter.Body.Bytes()))
|
||||
if !kv.writeCalled {
|
||||
t.Error("Write never called")
|
||||
}
|
||||
|
||||
// Read
|
||||
bodyReader := bytes.NewReader(httpWriter.Body.Bytes())
|
||||
httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
|
||||
httpRequest.Header.Set("Content-Type", "application/kv; charset=UTF-8")
|
||||
request := NewRequest(httpRequest)
|
||||
var bb Book
|
||||
request.ReadEntity(&bb)
|
||||
if !kv.readCalled {
|
||||
t.Error("Read never called")
|
||||
}
|
||||
}
|
1
vendor/github.com/emicklei/go-restful/examples/.goconvey
generated
vendored
Normal file
1
vendor/github.com/emicklei/go-restful/examples/.goconvey
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
ignore
|
1
vendor/github.com/emicklei/go-restful/examples/google_app_engine/.goconvey
generated
vendored
Normal file
1
vendor/github.com/emicklei/go-restful/examples/google_app_engine/.goconvey
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
ignore
|
20
vendor/github.com/emicklei/go-restful/examples/google_app_engine/app.yaml
generated
vendored
Normal file
20
vendor/github.com/emicklei/go-restful/examples/google_app_engine/app.yaml
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
#
|
||||
# Include your application ID here
|
||||
#
|
||||
application: <your_app_id>
|
||||
version: 1
|
||||
runtime: go
|
||||
api_version: go1
|
||||
|
||||
handlers:
|
||||
#
|
||||
# Regex for all swagger files to make as static content.
|
||||
# You should create the folder static/swagger and copy
|
||||
# swagger-ui into it.
|
||||
#
|
||||
- url: /apidocs/(.*?)/(.*\.(js|html|css))
|
||||
static_files: static/swagger/\1/\2
|
||||
upload: static/swagger/(.*?)/(.*\.(js|html|css))
|
||||
|
||||
- url: /.*
|
||||
script: _go_app
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue