Merge pull request #3943 from aledbf/update-dependencies
Update dependencies
This commit is contained in:
commit
1ed1dd588a
1359 changed files with 128485 additions and 32660 deletions
471
Gopkg.lock
generated
471
Gopkg.lock
generated
File diff suppressed because it is too large
Load diff
30
Gopkg.toml
30
Gopkg.toml
|
@ -20,7 +20,7 @@
|
|||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
ignored = ["github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller*"]
|
||||
ignored = ["k8s.io/ingress-nginx/images/*"]
|
||||
|
||||
[prune]
|
||||
non-go = true
|
||||
|
@ -33,18 +33,10 @@ ignored = ["github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller*"]
|
|||
unused-packages = false
|
||||
non-go = false
|
||||
|
||||
[[override]]
|
||||
name = "github.com/docker/distribution"
|
||||
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
|
||||
|
||||
[[override]]
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
source = "https://github.com/fsnotify/fsnotify.git"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/golang/glog"
|
||||
source = "k8s.io/klog/glog"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/eapache/channels"
|
||||
branch = "master"
|
||||
|
@ -99,28 +91,28 @@ ignored = ["github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller*"]
|
|||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
revision = "v1.13.3"
|
||||
revision = "v1.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/api"
|
||||
revision = "kubernetes-1.13.3"
|
||||
revision = "kubernetes-1.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
revision = "kubernetes-1.13.3"
|
||||
revision = "kubernetes-1.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
revision = "kubernetes-1.13.3"
|
||||
revision = "release-11.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
revision = "kubernetes-1.13.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apiserver"
|
||||
revision = "kubernetes-1.13.3"
|
||||
revision = "kubernetes-1.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/cli-runtime"
|
||||
revision = "kubernetes-1.13.3"
|
||||
revision = "kubernetes-1.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apiserver"
|
||||
revision = "kubernetes-1.14.0"
|
||||
|
|
|
@ -47,7 +47,7 @@ func main() {
|
|||
}
|
||||
|
||||
// Respect some basic kubectl flags like --namespace
|
||||
flags := genericclioptions.NewConfigFlags()
|
||||
flags := genericclioptions.NewConfigFlags(true)
|
||||
flags.AddFlags(rootCmd.PersistentFlags())
|
||||
|
||||
rootCmd.AddCommand(ingresses.CreateCommand(flags))
|
||||
|
|
|
@ -245,7 +245,7 @@ func TestStore(t *testing.T) {
|
|||
// Secret takes a bit to update
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
err = clientSet.Extensions().Ingresses(ni.Namespace).Delete(ni.Name, &metav1.DeleteOptions{})
|
||||
err = clientSet.ExtensionsV1beta1().Ingresses(ni.Namespace).Delete(ni.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("error creating ingress: %v", err)
|
||||
}
|
||||
|
@ -803,13 +803,13 @@ func deleteConfigMap(cm, ns string, clientSet kubernetes.Interface, t *testing.T
|
|||
|
||||
func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface, t *testing.T) *extensions.Ingress {
|
||||
t.Helper()
|
||||
ing, err := clientSet.Extensions().Ingresses(ingress.Namespace).Update(ingress)
|
||||
ing, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(ingress)
|
||||
|
||||
if err != nil {
|
||||
if k8sErrors.IsNotFound(err) {
|
||||
t.Logf("Ingress %v not found, creating", ingress)
|
||||
|
||||
ing, err = clientSet.Extensions().Ingresses(ingress.Namespace).Create(ingress)
|
||||
ing, err = clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(ingress)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating ingress %+v: %v", ingress, err)
|
||||
}
|
||||
|
@ -828,7 +828,7 @@ func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface,
|
|||
|
||||
func deleteIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface, t *testing.T) {
|
||||
t.Helper()
|
||||
err := clientSet.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &metav1.DeleteOptions{})
|
||||
err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Delete(ingress.Name, &metav1.DeleteOptions{})
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("failed to delete ingress %+v: %v", ingress, err)
|
||||
|
|
|
@ -18,9 +18,17 @@ package ssl
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -36,7 +44,7 @@ func generateRSACerts(host string) (*keyPair, *keyPair, error) {
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
key, err := certutil.NewPrivateKey()
|
||||
key, err := newPrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to create a server private key: %v", err)
|
||||
}
|
||||
|
@ -45,7 +53,7 @@ func generateRSACerts(host string) (*keyPair, *keyPair, error) {
|
|||
CommonName: host,
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||
}
|
||||
cert, err := certutil.NewSignedCert(config, key, ca.Cert, ca.Key)
|
||||
cert, err := newSignedCert(config, key, ca.Cert, ca.Key)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to sign the server certificate: %v", err)
|
||||
}
|
||||
|
@ -66,8 +74,8 @@ func TestStoreSSLCertOnDisk(t *testing.T) {
|
|||
|
||||
name := fmt.Sprintf("test-%v", time.Now().UnixNano())
|
||||
|
||||
c := certutil.EncodeCertPEM(cert.Cert)
|
||||
k := certutil.EncodePrivateKeyPEM(cert.Key)
|
||||
c := encodeCertPEM(cert.Cert)
|
||||
k := encodePrivateKeyPEM(cert.Key)
|
||||
|
||||
sslCert, err := CreateSSLCert(c, k)
|
||||
if err != nil {
|
||||
|
@ -102,9 +110,9 @@ func TestCACert(t *testing.T) {
|
|||
|
||||
name := fmt.Sprintf("test-%v", time.Now().UnixNano())
|
||||
|
||||
c := certutil.EncodeCertPEM(cert.Cert)
|
||||
k := certutil.EncodePrivateKeyPEM(cert.Key)
|
||||
ca := certutil.EncodeCertPEM(CA.Cert)
|
||||
c := encodeCertPEM(cert.Cert)
|
||||
k := encodePrivateKeyPEM(cert.Key)
|
||||
ca := encodeCertPEM(CA.Cert)
|
||||
|
||||
sslCert, err := CreateSSLCert(c, k)
|
||||
if err != nil {
|
||||
|
@ -151,7 +159,7 @@ func TestConfigureCACert(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("unexpected error creating SSL certificate: %v", err)
|
||||
}
|
||||
c := certutil.EncodeCertPEM(ca.Cert)
|
||||
c := encodeCertPEM(ca.Cert)
|
||||
|
||||
sslCert, err := CreateCACert(c)
|
||||
if err != nil {
|
||||
|
@ -187,8 +195,8 @@ func TestCreateSSLCert(t *testing.T) {
|
|||
t.Fatalf("unexpected error creating SSL certificate: %v", err)
|
||||
}
|
||||
|
||||
c := certutil.EncodeCertPEM(cert.Cert)
|
||||
k := certutil.EncodePrivateKeyPEM(cert.Key)
|
||||
c := encodeCertPEM(cert.Cert)
|
||||
k := encodePrivateKeyPEM(cert.Key)
|
||||
|
||||
sslCert, err := CreateSSLCert(c, k)
|
||||
if err != nil {
|
||||
|
@ -219,7 +227,7 @@ type keyPair struct {
|
|||
}
|
||||
|
||||
func newCA(name string) (*keyPair, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
key, err := newPrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create a private key for a new CA: %v", err)
|
||||
}
|
||||
|
@ -271,3 +279,77 @@ func TestIsValidHostname(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
duration365d = time.Hour * 24 * 365
|
||||
rsaKeySize = 2048
|
||||
)
|
||||
|
||||
// newPrivateKey creates an RSA private key
|
||||
func newPrivateKey() (*rsa.PrivateKey, error) {
|
||||
return rsa.GenerateKey(cryptorand.Reader, rsaKeySize)
|
||||
}
|
||||
|
||||
// newSignedCert creates a signed certificate using the given CA certificate and key
|
||||
func newSignedCert(cfg certutil.Config, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer) (*x509.Certificate, error) {
|
||||
serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(cfg.CommonName) == 0 {
|
||||
return nil, errors.New("must specify a CommonName")
|
||||
}
|
||||
if len(cfg.Usages) == 0 {
|
||||
return nil, errors.New("must specify at least one ExtKeyUsage")
|
||||
}
|
||||
|
||||
certTmpl := x509.Certificate{
|
||||
Subject: pkix.Name{
|
||||
CommonName: cfg.CommonName,
|
||||
Organization: cfg.Organization,
|
||||
},
|
||||
DNSNames: cfg.AltNames.DNSNames,
|
||||
IPAddresses: cfg.AltNames.IPs,
|
||||
SerialNumber: serial,
|
||||
NotBefore: caCert.NotBefore,
|
||||
NotAfter: time.Now().Add(duration365d).UTC(),
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: cfg.Usages,
|
||||
}
|
||||
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x509.ParseCertificate(certDERBytes)
|
||||
}
|
||||
|
||||
// encodePublicKeyPEM returns PEM-encoded public data
|
||||
func encodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) {
|
||||
der, err := x509.MarshalPKIXPublicKey(key)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
block := pem.Block{
|
||||
Type: "PUBLIC KEY",
|
||||
Bytes: der,
|
||||
}
|
||||
return pem.EncodeToMemory(&block), nil
|
||||
}
|
||||
|
||||
// encodePrivateKeyPEM returns PEM-encoded private key data
|
||||
func encodePrivateKeyPEM(key *rsa.PrivateKey) []byte {
|
||||
block := pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(key),
|
||||
}
|
||||
return pem.EncodeToMemory(&block)
|
||||
}
|
||||
|
||||
// encodeCertPEM returns PEM-endcoded certificate data
|
||||
func encodeCertPEM(cert *x509.Certificate) []byte {
|
||||
block := pem.Block{
|
||||
Type: certutil.CertificateBlockType,
|
||||
Bytes: cert.Raw,
|
||||
}
|
||||
return pem.EncodeToMemory(&block)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/apiserver/pkg/util/logs"
|
||||
"k8s.io/component-base/logs"
|
||||
|
||||
// required
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
|
|
@ -19,8 +19,8 @@ package framework
|
|||
import (
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
@ -68,12 +68,12 @@ func (f *Framework) NewDeployment(name, image string, port int32, replicas int32
|
|||
},
|
||||
}
|
||||
|
||||
deployment := &extensions.Deployment{
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: f.Namespace,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: NewInt32(replicas),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
|
|
|
@ -19,8 +19,8 @@ package framework
|
|||
import (
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
@ -35,12 +35,12 @@ func (f *Framework) NewGRPCFortuneTellerDeployment() {
|
|||
// NewNewGRPCFortuneTellerDeploymentWithReplicas creates a new deployment of the
|
||||
// fortune teller image in a particular namespace. Number of replicas is configurable
|
||||
func (f *Framework) NewNewGRPCFortuneTellerDeploymentWithReplicas(replicas int32) {
|
||||
deployment := &extensions.Deployment{
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fortune-teller",
|
||||
Namespace: f.Namespace,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: NewInt32(replicas),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
|
|
|
@ -19,8 +19,8 @@ package framework
|
|||
import (
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
)
|
||||
|
@ -73,12 +73,12 @@ func (f *Framework) NewInfluxDBDeployment() {
|
|||
|
||||
Expect(cm).NotTo(BeNil(), "expected a configmap but none returned")
|
||||
|
||||
deployment := &extensions.Deployment{
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "influxdb-svc",
|
||||
Namespace: f.Namespace,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: NewInt32(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
api "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
|
@ -108,11 +109,11 @@ func (f *Framework) EnsureService(service *core.Service) *core.Service {
|
|||
}
|
||||
|
||||
// EnsureDeployment creates a Deployment object or returns it if it already exists.
|
||||
func (f *Framework) EnsureDeployment(deployment *extensions.Deployment) (*extensions.Deployment, error) {
|
||||
d, err := f.KubeClientSet.Extensions().Deployments(deployment.Namespace).Update(deployment)
|
||||
func (f *Framework) EnsureDeployment(deployment *appsv1.Deployment) (*appsv1.Deployment, error) {
|
||||
d, err := f.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Update(deployment)
|
||||
if err != nil {
|
||||
if k8sErrors.IsNotFound(err) {
|
||||
return f.KubeClientSet.Extensions().Deployments(deployment.Namespace).Create(deployment)
|
||||
return f.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Create(deployment)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ var _ = framework.IngressNginxDescribe("[Serial] Pod Security Policies", func()
|
|||
|
||||
BeforeEach(func() {
|
||||
psp := createPodSecurityPolicy()
|
||||
_, err := f.KubeClientSet.Extensions().PodSecurityPolicies().Create(psp)
|
||||
_, err := f.KubeClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp)
|
||||
if !k8sErrors.IsAlreadyExists(err) {
|
||||
Expect(err).NotTo(HaveOccurred(), "creating Pod Security Policy")
|
||||
}
|
||||
|
|
|
@ -87,11 +87,11 @@ var _ = framework.IngressNginxDescribe("Status Update [Status]", func() {
|
|||
err = cmd.Process.Kill()
|
||||
Expect(err).NotTo(HaveOccurred(), "unexpected error terminating kubectl proxy")
|
||||
|
||||
ing, err = f.KubeClientSet.Extensions().Ingresses(f.Namespace).Get(host, metav1.GetOptions{})
|
||||
ing, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "unexpected error getting %s/%v Ingress", f.Namespace, host)
|
||||
|
||||
ing.Status.LoadBalancer.Ingress = []apiv1.LoadBalancerIngress{}
|
||||
_, err = f.KubeClientSet.Extensions().Ingresses(f.Namespace).UpdateStatus(ing)
|
||||
_, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).UpdateStatus(ing)
|
||||
Expect(err).NotTo(HaveOccurred(), "unexpected error cleaning Ingress status")
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
|
@ -110,7 +110,7 @@ var _ = framework.IngressNginxDescribe("Status Update [Status]", func() {
|
|||
}()
|
||||
|
||||
err = wait.Poll(10*time.Second, framework.DefaultTimeout, func() (done bool, err error) {
|
||||
ing, err = f.KubeClientSet.Extensions().Ingresses(f.Namespace).Get(host, metav1.GetOptions{})
|
||||
ing, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
127
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
127
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
|
@ -1,8 +1,129 @@
|
|||
# Changes
|
||||
|
||||
## v0.33.1
|
||||
## 0.37.2
|
||||
|
||||
all: release v0.33.1
|
||||
This patch release is mostly intended to bring in v0.3.0 of
|
||||
google.golang.org/api, which fixes a GCF deployment issue.
|
||||
|
||||
Note: we had to-date accidentally marked Redis as stable. In this release, we've
|
||||
fixed it by downgrading its documentation to alpha, as it is in other languages
|
||||
and docs.
|
||||
|
||||
- all:
|
||||
- Document context in generated libraries.
|
||||
|
||||
## 0.37.1
|
||||
|
||||
Small go.mod version bumps to bring in v0.2.0 of google.golang.org/api, which
|
||||
introduces a new oauth2 url.
|
||||
|
||||
## 0.37.0
|
||||
|
||||
- spanner:
|
||||
- Add BatchDML method.
|
||||
- Reduced initial time between retries.
|
||||
- bigquery:
|
||||
- Produce better error messages for InferSchema.
|
||||
- Add logical type control for avro loads.
|
||||
- Add support for the GEOGRAPHY type.
|
||||
- datastore:
|
||||
- Add sentinel value DetectProjectID for auto-detecting project ID.
|
||||
- Allow flatten tag on struct pointers.
|
||||
- Fixed a bug that caused queries to panic with invalid queries. Instead they
|
||||
will now return an error.
|
||||
- profiler:
|
||||
- Add ability to override GCE zone and instance.
|
||||
- pubsub:
|
||||
- BEHAVIOR CHANGE: Refactor error code retry logic. RPCs should now more
|
||||
consistently retry specific error codes based on whether they're idempotent
|
||||
or non-idempotent.
|
||||
- httpreplay: Fixed a bug when a non-GET request had a zero-length body causing
|
||||
the Content-Length header to be dropped.
|
||||
- iot:
|
||||
- Add new apiv1 client.
|
||||
- securitycenter:
|
||||
- Add new apiv1 client.
|
||||
- cloudscheduler:
|
||||
- Add new apiv1 client.
|
||||
|
||||
## 0.36.0
|
||||
|
||||
- spanner:
|
||||
- Reduce minimum retry backoff from 1s to 100ms. This makes time between
|
||||
retries much faster and should improve latency.
|
||||
- storage:
|
||||
- Add support for Bucket Policy Only.
|
||||
- kms:
|
||||
- Add ResourceIAM helper method.
|
||||
- Deprecate KeyRingIAM and CryptoKeyIAM. Please use ResourceIAM.
|
||||
- firestore:
|
||||
- Switch from v1beta1 API to v1 API.
|
||||
- Allow emulator with FIRESTORE_EMULATOR_HOST.
|
||||
- bigquery:
|
||||
- Add NumLongTermBytes to Table.
|
||||
- Add TotalBytesProcessedAccuracy to QueryStatistics.
|
||||
- irm:
|
||||
- Add new v1alpha2 client.
|
||||
- talent:
|
||||
- Add new v4beta1 client.
|
||||
- rpcreplay:
|
||||
- Fix connection to work with grpc >= 1.17.
|
||||
- It is now required for an actual gRPC server to be running for Dial to
|
||||
succeed.
|
||||
|
||||
## 0.35.1
|
||||
|
||||
- spanner:
|
||||
- Adds OpenCensus views back to public API.
|
||||
|
||||
## v0.35.0
|
||||
|
||||
- all:
|
||||
- Add go.mod and go.sum.
|
||||
- Switch usage of gax-go to gax-go/v2.
|
||||
- bigquery:
|
||||
- Fix bug where time partitioning could not be removed from a table.
|
||||
- Fix panic that occurred with empty query parameters.
|
||||
- bttest:
|
||||
- Fix bug where deleted rows were returned by ReadRows.
|
||||
- bigtable/emulator:
|
||||
- Configure max message size to 256 MiB.
|
||||
- firestore:
|
||||
- Allow non-transactional queries in transactions.
|
||||
- Allow StartAt/EndBefore on direct children at any depth.
|
||||
- QuerySnapshotIterator.Stop may be called in an error state.
|
||||
- Fix bug the prevented reset of transaction write state in between retries.
|
||||
- functions/metadata:
|
||||
- Make Metadata.Resource a pointer.
|
||||
- logging:
|
||||
- Make SpanID available in logging.Entry.
|
||||
- metadata:
|
||||
- Wrap !200 error code in a typed err.
|
||||
- profiler:
|
||||
- Add function to check if function name is within a particular file in the
|
||||
profile.
|
||||
- Set parent field in create profile request.
|
||||
- Return kubernetes client to start cluster, so client can be used to poll
|
||||
cluster.
|
||||
- Add function for checking if filename is in profile.
|
||||
- pubsub:
|
||||
- Fix bug where messages expired without an initial modack in
|
||||
synchronous=true mode.
|
||||
- Receive does not retry ResourceExhausted errors.
|
||||
- spanner:
|
||||
- client.Close now cancels existing requests and should be much faster for
|
||||
large amounts of sessions.
|
||||
- Correctly allow MinOpened sessions to be spun up.
|
||||
|
||||
## v0.34.0
|
||||
|
||||
- functions/metadata:
|
||||
- Switch to using JSON in context.
|
||||
- Make Resource a value.
|
||||
- vision: Fix ProductSearch return type.
|
||||
- datastore: Add an example for how to handle MultiError.
|
||||
|
||||
## v0.33.1
|
||||
|
||||
- compute: Removes an erroneously added go.mod.
|
||||
- logging: Populate source location in fromLogEntry.
|
||||
|
@ -179,7 +300,7 @@ hard-to-reproduce Pub/Sub issues.
|
|||
|
||||
## v0.25.0
|
||||
|
||||
- Added [Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CODE_OF_CONDUCT.md)
|
||||
- Added [Code of Conduct](https://github.com/googleapis/google-cloud-go/blob/master/CODE_OF_CONDUCT.md)
|
||||
- bigtable:
|
||||
- cbt: Support a GC policy of "never".
|
||||
- errorreporting:
|
||||
|
|
6
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
6
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
|
@ -26,6 +26,10 @@ provide a commit message, and use `git codereview mail` to create a Gerrit CL.
|
|||
1. Keep amending to the change with `git codereview change` and mail as your
|
||||
receive feedback. Each new mailed amendment will create a new patch set for
|
||||
your change in Gerrit.
|
||||
- Note: if your change includes a breaking change, our breaking change
|
||||
detector will cause CI/CD to fail. If your breaking change is acceptable
|
||||
in some way, add BREAKING_CHANGE_ACCEPTABLE=<reason> to cause the
|
||||
detector not to be run and to make it clear why that is acceptable.
|
||||
|
||||
## Integration Tests
|
||||
|
||||
|
@ -121,7 +125,7 @@ $ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
|||
$ gcloud auth login
|
||||
|
||||
# Create the indexes used in the datastore integration tests.
|
||||
$ gcloud datastore create-indexes datastore/testdata/index.yaml
|
||||
$ gcloud datastore indexes create datastore/testdata/index.yaml
|
||||
|
||||
# Creates a Google Cloud storage bucket with the same name as your test project,
|
||||
# and with the Stackdriver Logging service account as owner, for the sink
|
||||
|
|
64
vendor/cloud.google.com/go/README.md
generated
vendored
64
vendor/cloud.google.com/go/README.md
generated
vendored
|
@ -51,35 +51,35 @@ Changes have been moved to [CHANGES](https://github.com/GoogleCloudPlatform/goog
|
|||
|
||||
Google API | Status | Package
|
||||
---------------------------------------------|--------------|-----------------------------------------------------------
|
||||
[Asset][cloud-asset] | alpha | [`godoc.org/cloud.google.com/go/asset/v1beta`][cloud-asset-ref]
|
||||
[BigQuery][cloud-bigquery] | stable | [`godoc.org/cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
[Bigtable][cloud-bigtable] | stable | [`godoc.org/cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[Cloudtasks][cloud-tasks] | beta | [`godoc.org/cloud.google.com/go/cloudtasks/apiv2beta3`][cloud-tasks-ref]
|
||||
[Container][cloud-container] | stable | [`godoc.org/cloud.google.com/go/container/apiv1`][cloud-container-ref]
|
||||
[ContainerAnalysis][cloud-containeranalysis] | beta | [`godoc.org/cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref]
|
||||
[Dataproc][cloud-dataproc] | stable | [`godoc.org/cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref]
|
||||
[Datastore][cloud-datastore] | stable | [`godoc.org/cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||
[Debugger][cloud-debugger] | alpha | [`godoc.org/cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
|
||||
[Dialogflow][cloud-dialogflow] | alpha | [`godoc.org/cloud.google.com/go/dialogflow/apiv2`][cloud-dialogflow-ref]
|
||||
[Data Loss Prevention][cloud-dlp] | alpha | [`godoc.org/cloud.google.com/go/dlp/apiv2`][cloud-dlp-ref]
|
||||
[ErrorReporting][cloud-errors] | alpha | [`godoc.org/cloud.google.com/go/errorreporting`][cloud-errors-ref]
|
||||
[Firestore][cloud-firestore] | beta | [`godoc.org/cloud.google.com/go/firestore`][cloud-firestore-ref]
|
||||
[IAM][cloud-iam] | stable | [`godoc.org/cloud.google.com/go/iam`][cloud-iam-ref]
|
||||
[KMS][cloud-kms] | stable | [`godoc.org/cloud.google.com/go/kms`][cloud-kms-ref]
|
||||
[Natural Language][cloud-natural-language] | stable | [`godoc.org/cloud.google.com/go/language/apiv1`][cloud-natural-language-ref]
|
||||
[Logging][cloud-logging] | stable | [`godoc.org/cloud.google.com/go/logging`][cloud-logging-ref]
|
||||
[Monitoring][cloud-monitoring] | alpha | [`godoc.org/cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
|
||||
[Asset][cloud-asset] | alpha | [`cloud.google.com/go/asset/v1beta`][cloud-asset-ref]
|
||||
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[Cloudtasks][cloud-tasks] | beta | [`cloud.google.com/go/cloudtasks/apiv2beta3`][cloud-tasks-ref]
|
||||
[Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
|
||||
[ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref]
|
||||
[Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref]
|
||||
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||
[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
|
||||
[Dialogflow][cloud-dialogflow] | alpha | [`cloud.google.com/go/dialogflow/apiv2`][cloud-dialogflow-ref]
|
||||
[Data Loss Prevention][cloud-dlp] | alpha | [`cloud.google.com/go/dlp/apiv2`][cloud-dlp-ref]
|
||||
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
|
||||
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
|
||||
[IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`][cloud-iam-ref]
|
||||
[KMS][cloud-kms] | stable | [`cloud.google.com/go/kms`][cloud-kms-ref]
|
||||
[Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-natural-language-ref]
|
||||
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
||||
[Monitoring][cloud-monitoring] | alpha | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
|
||||
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref]
|
||||
[Pub/Sub][cloud-pubsub] | stable | [`godoc.org/cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
||||
[Memorystore][cloud-memorystore] | stable | [`godoc.org/cloud.google.com/go/redis/apiv1beta1`][cloud-memorystore-ref]
|
||||
[Spanner][cloud-spanner] | stable | [`godoc.org/cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||
[Speech][cloud-speech] | stable | [`godoc.org/cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
|
||||
[Storage][cloud-storage] | stable | [`godoc.org/cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Text To Speech][cloud-texttospeech] | alpha | [`godoc.org/cloud.google.com/go/texttospeech/apiv1`][cloud-storage-ref]
|
||||
[Trace][cloud-trace] | alpha | [`godoc.org/cloud.google.com/go/trace/apiv2`][cloud-translation-ref]
|
||||
[Translation][cloud-translation] | stable | [`godoc.org/cloud.google.com/go/translate`][cloud-translation-ref]
|
||||
[Video Intelligence][cloud-video] | alpha | [`godoc.org/cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
|
||||
[Vision][cloud-vision] | stable | [`godoc.org/cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
|
||||
[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
||||
[Memorystore][cloud-memorystore] | alpha | [`cloud.google.com/go/redis/apiv1`][cloud-memorystore-ref]
|
||||
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
|
||||
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Text To Speech][cloud-texttospeech] | alpha | [`cloud.google.com/go/texttospeech/apiv1`][cloud-texttospeech-ref]
|
||||
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace/apiv2`][cloud-trace-ref]
|
||||
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
|
||||
[Video Intelligence][cloud-video] | alpha | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
|
||||
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
|
||||
|
||||
> **Alpha status**: the API is still being actively developed. As a
|
||||
> result, it might change in backward-incompatible ways and is not recommended
|
||||
|
@ -102,9 +102,7 @@ for updates on these packages.
|
|||
## Go Versions Supported
|
||||
|
||||
We support the two most recent major versions of Go. If Google App Engine uses
|
||||
an older version, we support that as well. You can see which versions are
|
||||
currently supported by looking at the lines following `go:` in
|
||||
[`.travis.yml`](.travis.yml).
|
||||
an older version, we support that as well.
|
||||
|
||||
## Authorization
|
||||
|
||||
|
@ -481,7 +479,7 @@ for more information.
|
|||
|
||||
[cloud-memorystore]: https://cloud.google.com/memorystore/
|
||||
[cloud-memorystore-docs]: https://cloud.google.com/memorystore/docs
|
||||
[cloud-memorystore-ref]: https://godoc.org/cloud.google.com/go/redis/apiv1beta1
|
||||
[cloud-memorystore-ref]: https://godoc.org/cloud.google.com/go/redis/apiv1
|
||||
|
||||
[cloud-texttospeech]: https://cloud.google.com/texttospeech/
|
||||
[cloud-texttospeech-docs]: https://cloud.google.com/texttospeech/docs
|
||||
|
@ -489,7 +487,7 @@ for more information.
|
|||
|
||||
[cloud-trace]: https://cloud.google.com/trace/
|
||||
[cloud-trace-docs]: https://cloud.google.com/trace/docs
|
||||
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace/apiv1
|
||||
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace/apiv2
|
||||
|
||||
[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/
|
||||
[cloud-dialogflow-docs]: https://cloud.google.com/dialogflow-enterprise/docs/
|
||||
|
|
2
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
2
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
|
@ -44,4 +44,4 @@ Install [releasetool](https://github.com/googleapis/releasetool).
|
|||
description, copy the changes added to CHANGES.md.
|
||||
|
||||
|
||||
[releases]: https://github.com/GoogleCloudPlatform/google-cloud-go/releases
|
||||
[releases]: https://github.com/googleapis/google-cloud-go/releases
|
||||
|
|
2
vendor/cloud.google.com/go/cloud.go
generated
vendored
2
vendor/cloud.google.com/go/cloud.go
generated
vendored
|
@ -64,7 +64,7 @@ pooled and addressed in a round robin fashion.
|
|||
Using the Libraries with Docker
|
||||
|
||||
Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to
|
||||
hang, because gRPC retries indefinitely. See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/928
|
||||
hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928
|
||||
for more information.
|
||||
|
||||
|
||||
|
|
24
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
24
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
|
@ -137,7 +137,7 @@ func testOnGCE() bool {
|
|||
resc := make(chan bool, 2)
|
||||
|
||||
// Try two strategies in parallel.
|
||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||
// See https://github.com/googleapis/google-cloud-go/issues/194
|
||||
go func() {
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
|
@ -300,8 +300,8 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
|||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", u, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := c.hc.Do(req)
|
||||
|
@ -312,13 +312,13 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
|||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
|
@ -499,3 +499,15 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Error contains an error response from the server.
|
||||
type Error struct {
|
||||
// Code is the HTTP response status code.
|
||||
Code int
|
||||
// Message is the server response message.
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
|
||||
}
|
||||
|
|
25
vendor/cloud.google.com/go/go.mod
generated
vendored
Normal file
25
vendor/cloud.google.com/go/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
module cloud.google.com/go
|
||||
|
||||
require (
|
||||
github.com/golang/mock v1.2.0
|
||||
github.com/golang/protobuf v1.2.0
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c
|
||||
github.com/google/go-cmp v0.2.0
|
||||
github.com/google/martian v2.1.0+incompatible
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57
|
||||
github.com/googleapis/gax-go/v2 v2.0.4
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024
|
||||
go.opencensus.io v0.19.2
|
||||
golang.org/x/build v0.0.0-20190314133821-5284462c4bec
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138
|
||||
google.golang.org/api v0.3.0
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19
|
||||
google.golang.org/grpc v1.19.0
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a
|
||||
)
|
209
vendor/cloud.google.com/go/go.sum
generated
vendored
Normal file
209
vendor/cloud.google.com/go/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,209 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 h1:eqyIo2HjKhKe/mJzTG8n4VqvLXIOEG+SLdDqX7xGtkY=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.opencensus.io v0.19.1 h1:gPYKQ/GAQYR2ksU+qXNmq3CrOZWT1kkryvW6O0v1acY=
|
||||
go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
|
||||
go.opencensus.io v0.19.2 h1:ZZpq6xI6kv/LuE/5s5UQvBU5vMjvRnPb8PvJrIntAnc=
|
||||
go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190314133821-5284462c4bec h1:9vRy8wdKITrvvXLEOnNC9FHAGhmzy3OwvKfscMgJ4vo=
|
||||
golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 h1:x/bBzNauLQAlE3fLku/xy92Y8QwKX5HZymrMz2IiKFc=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961 h1:GmgasJE571dBGXS7E282h2rIZj+KvCLV8z5I6QXbKNI=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f h1:hX65Cu3JDlGH3uEdK7I99Ii+9kjD6mvnnpfLdEAH0x4=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953 h1:LuZIitY8waaxUfNIdtajyE/YzA/zyf0YxXG27VpLrkg=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 h1:uESlIz09WIHT2I+pasSXcpLYqYK8wHcdCetU3VuMBJE=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497 h1:GXMDsk4xWZCVzkAWCabrabzCCVmfiYSw72f1K/S9QIY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52 h1:JG/0uqcGdTNgq7FdU+61l5Pdmb8putNZlXb65bJBROs=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c h1:vamGzbGri8IKo20MQncCuljcQ5uAO6kaCeawQPVblAI=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138 h1:H3uGjxCR/6Ds0Mjgyp7LMK81+LvmbvWWEnJhzk1Pi9E=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.2.0 h1:B5VXkdjt7K2Gm6fGBC9C9a1OAKJDT95cTqwet+2zib0=
|
||||
google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU=
|
||||
google.golang.org/api v0.3.0 h1:UIJY20OEo3+tK5MBlcdx37kmdH6EnRjGkW78mc6+EeA=
|
||||
google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0 h1:FBSsiFRMz3LBeXIomRnVzrQwSDj4ibvcRexLG0LZGQk=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a h1:/8zB6iBfHCl1qAnEAWwGPNrUvapuy6CPla1VM0k8hQw=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
2
vendor/cloud.google.com/go/old-news.md
generated
vendored
2
vendor/cloud.google.com/go/old-news.md
generated
vendored
|
@ -242,7 +242,7 @@ cloud.google.com/go/vision/apiv1 instead.
|
|||
|
||||
- trace: several changes to the surface. See the link below.
|
||||
|
||||
[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md)
|
||||
[Code changes required from v0.9.0.](https://github.com/googleapis/google-cloud-go/blob/master/MIGRATION.md)
|
||||
|
||||
|
||||
_March 17, 2017_
|
||||
|
|
45
vendor/cloud.google.com/go/regen-gapic.sh
generated
vendored
45
vendor/cloud.google.com/go/regen-gapic.sh
generated
vendored
|
@ -28,9 +28,12 @@ google/iam/artman_iam_admin.yaml
|
|||
google/cloud/asset/artman_cloudasset_v1beta1.yaml
|
||||
google/iam/credentials/artman_iamcredentials_v1.yaml
|
||||
google/cloud/bigquery/datatransfer/artman_bigquerydatatransfer.yaml
|
||||
google/cloud/bigquery/storage/artman_bigquerystorage_v1beta1.yaml
|
||||
google/cloud/dataproc/artman_dataproc_v1.yaml
|
||||
google/cloud/dataproc/artman_dataproc_v1beta2.yaml
|
||||
google/cloud/dialogflow/artman_dialogflow_v2.yaml
|
||||
google/cloud/iot/artman_cloudiot.yaml
|
||||
google/cloud/irm/artman_irm_v1alpha2.yaml
|
||||
google/cloud/kms/artman_cloudkms.yaml
|
||||
google/cloud/language/artman_language_v1.yaml
|
||||
google/cloud/language/artman_language_v1beta2.yaml
|
||||
|
@ -39,9 +42,12 @@ google/cloud/oslogin/artman_oslogin_v1beta.yaml
|
|||
google/cloud/redis/artman_redis_v1beta1.yaml
|
||||
google/cloud/redis/artman_redis_v1.yaml
|
||||
google/cloud/scheduler/artman_cloudscheduler_v1beta1.yaml
|
||||
google/cloud/scheduler/artman_cloudscheduler_v1.yaml
|
||||
google/cloud/securitycenter/artman_securitycenter_v1beta1.yaml
|
||||
google/cloud/securitycenter/artman_securitycenter_v1.yaml
|
||||
google/cloud/speech/artman_speech_v1.yaml
|
||||
google/cloud/speech/artman_speech_v1p1beta1.yaml
|
||||
google/cloud/talent/artman_talent_v4beta1.yaml
|
||||
google/cloud/tasks/artman_cloudtasks_v2beta2.yaml
|
||||
google/cloud/tasks/artman_cloudtasks_v2beta3.yaml
|
||||
google/cloud/texttospeech/artman_texttospeech_v1.yaml
|
||||
|
@ -72,15 +78,32 @@ for api in "${APIS[@]}"; do
|
|||
cp -r artman-genfiles/gapi-*/cloud.google.com/go/* $GOPATH/src/cloud.google.com/go/
|
||||
done
|
||||
|
||||
# NOTE(pongad): `sed -i` doesn't work on Macs, because -i option needs an argument.
|
||||
# `-i ''` doesn't work on GNU, since the empty string is treated as a file name.
|
||||
# So we just create the backup and delete it after.
|
||||
ver=$(date +%Y%m%d)
|
||||
find $GOPATH/src/cloud.google.com/go/ -name '*.go' -exec sed -i.backup -e "s/^const versionClient.*/const versionClient = \"$ver\"/" '{}' +
|
||||
pushd $GOPATH/src/cloud.google.com/go/
|
||||
gofmt -s -d -l -w . && goimports -w .
|
||||
|
||||
# NOTE(pongad): `sed -i` doesn't work on Macs, because -i option needs an argument.
|
||||
# `-i ''` doesn't work on GNU, since the empty string is treated as a file name.
|
||||
# So we just create the backup and delete it after.
|
||||
ver=$(date +%Y%m%d)
|
||||
git ls-files -mo | while read modified; do
|
||||
dir=${modified%/*.*}
|
||||
find . -path "*/$dir/doc.go" -exec sed -i.backup -e "s/^const versionClient.*/const versionClient = \"$ver\"/" '{}' +
|
||||
done
|
||||
popd
|
||||
|
||||
|
||||
HASMANUAL=(
|
||||
errorreporting/apiv1beta1
|
||||
firestore/apiv1beta1
|
||||
firestore/apiv1
|
||||
logging/apiv2
|
||||
longrunning/autogen
|
||||
pubsub/apiv1
|
||||
spanner/apiv1
|
||||
trace/apiv1
|
||||
)
|
||||
for dir in "${HASMANUAL[@]}"; do
|
||||
find "$GOPATH/src/cloud.google.com/go/$dir" -name '*.go' -exec sed -i.backup -e 's/setGoogleClientInfo/SetGoogleClientInfo/g' '{}' '+'
|
||||
done
|
||||
|
||||
find $GOPATH/src/cloud.google.com/go/ -name '*.backup' -delete
|
||||
|
||||
#go list cloud.google.com/go/... | grep apiv | xargs go test
|
||||
|
||||
#go test -short cloud.google.com/go/...
|
||||
|
||||
#echo "googleapis version: $(git rev-parse HEAD)"
|
||||
|
|
33
vendor/cloud.google.com/go/tools.go
generated
vendored
Normal file
33
vendor/cloud.google.com/go/tools.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
// +build tools
|
||||
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This package exists to cause `go mod` and `go get` to believe these tools
|
||||
// are dependencies, even though they are not runtime dependencies of any
|
||||
// package (these are tools used by our CI builds). This means they will appear
|
||||
// in our `go.mod` file, but will not be a part of the build. Also, since the
|
||||
// build target is something non-existent, these should not be included in any
|
||||
// binaries.
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
_ "github.com/golang/protobuf/protoc-gen-go"
|
||||
_ "github.com/jstemmer/go-junit-report"
|
||||
_ "golang.org/x/exp/cmd/apidiff"
|
||||
_ "golang.org/x/lint/golint"
|
||||
_ "golang.org/x/tools/cmd/goimports"
|
||||
_ "honnef.co/go/tools/cmd/staticcheck"
|
||||
)
|
2
vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
generated
vendored
2
vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
generated
vendored
|
@ -1,7 +1,7 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
|
||||
go_import_path: contrib.go.opencensus.io/exporter/ocagent
|
||||
|
||||
|
|
4
vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
generated
vendored
4
vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
generated
vendored
|
@ -14,7 +14,7 @@ Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core l
|
|||
## Installation
|
||||
|
||||
```bash
|
||||
$ go get -u contrib.go.opencensus.io/exporter/ocagent/v1
|
||||
$ go get -u contrib.go.opencensus.io/exporter/ocagent
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
@ -26,7 +26,7 @@ import (
|
|||
"log"
|
||||
"time"
|
||||
|
||||
"contrib.go.opencensus.io/exporter/ocagent/v1"
|
||||
"contrib.go.opencensus.io/exporter/ocagent"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
|
|
94
vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go
generated
vendored
Normal file
94
vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
sDisconnected int32 = 5 + iota
|
||||
sConnected
|
||||
)
|
||||
|
||||
func (ae *Exporter) setStateDisconnected() {
|
||||
atomic.StoreInt32(&ae.connectionState, sDisconnected)
|
||||
ae.disconnectedCh <- true
|
||||
}
|
||||
|
||||
func (ae *Exporter) setStateConnected() {
|
||||
atomic.StoreInt32(&ae.connectionState, sConnected)
|
||||
}
|
||||
|
||||
func (ae *Exporter) connected() bool {
|
||||
return atomic.LoadInt32(&ae.connectionState) == sConnected
|
||||
}
|
||||
|
||||
const defaultConnReattemptPeriod = 10 * time.Second
|
||||
|
||||
func (ae *Exporter) indefiniteBackgroundConnection() error {
|
||||
defer func() {
|
||||
ae.backgroundConnectionDoneCh <- true
|
||||
}()
|
||||
|
||||
connReattemptPeriod := ae.reconnectionPeriod
|
||||
if connReattemptPeriod <= 0 {
|
||||
connReattemptPeriod = defaultConnReattemptPeriod
|
||||
}
|
||||
|
||||
// No strong seeding required, nano time can
|
||||
// already help with pseudo uniqueness.
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024)))
|
||||
|
||||
// maxJitter: 1 + (70% of the connectionReattemptPeriod)
|
||||
maxJitter := int64(1 + 0.7*float64(connReattemptPeriod))
|
||||
|
||||
for {
|
||||
// Otherwise these will be the normal scenarios to enable
|
||||
// reconnections if we trip out.
|
||||
// 1. If we've stopped, return entirely
|
||||
// 2. Otherwise block until we are disconnected, and
|
||||
// then retry connecting
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return errStopped
|
||||
|
||||
case <-ae.disconnectedCh:
|
||||
// Normal scenario that we'll wait for
|
||||
}
|
||||
|
||||
if err := ae.connect(); err == nil {
|
||||
ae.setStateConnected()
|
||||
} else {
|
||||
ae.setStateDisconnected()
|
||||
}
|
||||
|
||||
// Apply some jitter to avoid lockstep retrials of other
|
||||
// agent-exporters. Lockstep retrials could result in an
|
||||
// innocent DDOS, by clogging the machine's resources and network.
|
||||
jitter := time.Duration(rng.Int63n(maxJitter))
|
||||
<-time.After(connReattemptPeriod + jitter)
|
||||
}
|
||||
}
|
||||
|
||||
func (ae *Exporter) connect() error {
|
||||
cc, err := ae.dialToAgent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ae.enableConnectionStreams(cc)
|
||||
}
|
10
vendor/contrib.go.opencensus.io/exporter/ocagent/go.mod
generated
vendored
10
vendor/contrib.go.opencensus.io/exporter/ocagent/go.mod
generated
vendored
|
@ -1,9 +1,9 @@
|
|||
module contrib.go.opencensus.io/exporter/ocagent
|
||||
|
||||
require (
|
||||
github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a
|
||||
github.com/golang/protobuf v1.2.0
|
||||
go.opencensus.io v0.17.0
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf
|
||||
google.golang.org/grpc v1.15.0
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0 // this is to match the version used in census-instrumentation/opencensus-service
|
||||
github.com/golang/protobuf v1.3.1
|
||||
go.opencensus.io v0.19.1
|
||||
google.golang.org/api v0.2.0
|
||||
google.golang.org/grpc v1.19.1
|
||||
)
|
||||
|
|
112
vendor/contrib.go.opencensus.io/exporter/ocagent/go.sum
generated
vendored
112
vendor/contrib.go.opencensus.io/exporter/ocagent/go.sum
generated
vendored
|
@ -1,44 +1,114 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/census-instrumentation/opencensus-proto v0.0.1 h1:4v5I+ax5jCmwTYVaWQacX8ZSxvUZemBX4UwBGSkDeoA=
|
||||
github.com/census-instrumentation/opencensus-proto v0.0.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a h1:t88pXOTS5K+pjfuhTOcul6sdC4khgqB8ukyfbe62Zxo=
|
||||
github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.6.2 h1:8KyC64BiO8ndiGHY5DlFWWdangUPC9QHPakFRre/Ud0=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
go.opencensus.io v0.17.0 h1:2Cu88MYg+1LU+WVD+NWwYhyP0kKgRlN9QjWGaX0jKTE=
|
||||
go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
go.opencensus.io v0.19.1 h1:gPYKQ/GAQYR2ksU+qXNmq3CrOZWT1kkryvW6O0v1acY=
|
||||
go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.2.0 h1:B5VXkdjt7K2Gm6fGBC9C9a1OAKJDT95cTqwet+2zib0=
|
||||
google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.15.0 h1:Az/KuahOM4NAidTEuJCv/RonAA7rYsTPkqXVjr+8OOw=
|
||||
google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM=
|
||||
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
7
vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go
generated
vendored
7
vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go
generated
vendored
|
@ -21,7 +21,12 @@ import (
|
|||
"go.opencensus.io"
|
||||
)
|
||||
|
||||
func createNodeInfo(nodeName string) *commonpb.Node {
|
||||
// NodeWithStartTime creates a node using nodeName and derives:
|
||||
// Hostname from the environment
|
||||
// Pid from the current process
|
||||
// StartTimestamp from the start time of this process
|
||||
// Language and library information.
|
||||
func NodeWithStartTime(nodeName string) *commonpb.Node {
|
||||
return &commonpb.Node{
|
||||
Identifier: &commonpb.ProcessIdentifier{
|
||||
HostName: os.Getenv("HOSTNAME"),
|
||||
|
|
375
vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go
generated
vendored
375
vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go
generated
vendored
|
@ -23,11 +23,18 @@ import (
|
|||
|
||||
"google.golang.org/api/support/bundler"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"go.opencensus.io/resource"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
agentcommonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
||||
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
||||
agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1"
|
||||
agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1"
|
||||
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
||||
resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
|
||||
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
|
||||
)
|
||||
|
||||
|
@ -41,22 +48,43 @@ func init() {
|
|||
}
|
||||
|
||||
var _ trace.Exporter = (*Exporter)(nil)
|
||||
var _ view.Exporter = (*Exporter)(nil)
|
||||
|
||||
type Exporter struct {
|
||||
connectionState int32
|
||||
|
||||
// mu protects the non-atomic and non-channel variables
|
||||
mu sync.RWMutex
|
||||
started bool
|
||||
stopped bool
|
||||
agentPort uint16
|
||||
agentAddress string
|
||||
serviceName string
|
||||
canDialInsecure bool
|
||||
traceSvcClient agenttracepb.TraceServiceClient
|
||||
traceExporter agenttracepb.TraceService_ExportClient
|
||||
nodeInfo *agentcommonpb.Node
|
||||
grpcClientConn *grpc.ClientConn
|
||||
mu sync.RWMutex
|
||||
// senderMu protects the concurrent unsafe traceExporter client
|
||||
senderMu sync.RWMutex
|
||||
started bool
|
||||
stopped bool
|
||||
agentAddress string
|
||||
serviceName string
|
||||
canDialInsecure bool
|
||||
traceExporter agenttracepb.TraceService_ExportClient
|
||||
metricsExporter agentmetricspb.MetricsService_ExportClient
|
||||
nodeInfo *commonpb.Node
|
||||
grpcClientConn *grpc.ClientConn
|
||||
reconnectionPeriod time.Duration
|
||||
resource *resourcepb.Resource
|
||||
compressor string
|
||||
headers map[string]string
|
||||
|
||||
startOnce sync.Once
|
||||
stopCh chan bool
|
||||
disconnectedCh chan bool
|
||||
|
||||
backgroundConnectionDoneCh chan bool
|
||||
|
||||
traceBundler *bundler.Bundler
|
||||
|
||||
// viewDataBundler is the bundler to enable conversion
|
||||
// from OpenCensus-Go view.Data to metricspb.Metric.
|
||||
// Please do not confuse it with metricsBundler!
|
||||
viewDataBundler *bundler.Bundler
|
||||
|
||||
clientTransportCredentials credentials.TransportCredentials
|
||||
}
|
||||
|
||||
func NewExporter(opts ...ExporterOption) (*Exporter, error) {
|
||||
|
@ -77,16 +105,22 @@ func NewUnstartedExporter(opts ...ExporterOption) (*Exporter, error) {
|
|||
for _, opt := range opts {
|
||||
opt.withExporter(e)
|
||||
}
|
||||
if e.agentPort <= 0 {
|
||||
e.agentPort = DefaultAgentPort
|
||||
}
|
||||
traceBundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) {
|
||||
e.uploadTraces(bundle.([]*trace.SpanData))
|
||||
})
|
||||
traceBundler.DelayThreshold = 2 * time.Second
|
||||
traceBundler.BundleCountThreshold = spanDataBufferSize
|
||||
e.traceBundler = traceBundler
|
||||
e.nodeInfo = createNodeInfo(e.serviceName)
|
||||
|
||||
viewDataBundler := bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) {
|
||||
e.uploadViewData(bundle.([]*view.Data))
|
||||
})
|
||||
viewDataBundler.DelayThreshold = 2 * time.Second
|
||||
viewDataBundler.BundleCountThreshold = 500 // TODO: (@odeke-em) make this configurable.
|
||||
e.viewDataBundler = viewDataBundler
|
||||
e.nodeInfo = NodeWithStartTime(e.serviceName)
|
||||
e.resource = resourceProtoFromEnv()
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
|
@ -95,26 +129,34 @@ const (
|
|||
maxInitialTracesRetries = 10
|
||||
)
|
||||
|
||||
var (
|
||||
errAlreadyStarted = errors.New("already started")
|
||||
errNotStarted = errors.New("not started")
|
||||
errStopped = errors.New("stopped")
|
||||
errNoConnection = errors.New("no active connection")
|
||||
)
|
||||
|
||||
// Start dials to the agent, establishing a connection to it. It also
|
||||
// initiates the Config and Trace services by sending over the initial
|
||||
// messages that consist of the node identifier. Start performs a best case
|
||||
// attempt to try to send the initial messages, by applying exponential
|
||||
// backoff at most 10 times.
|
||||
// messages that consist of the node identifier. Start invokes a background
|
||||
// connector that will reattempt connections to the agent periodically
|
||||
// if the connection dies.
|
||||
func (ae *Exporter) Start() error {
|
||||
ae.mu.Lock()
|
||||
defer ae.mu.Unlock()
|
||||
var err = errAlreadyStarted
|
||||
ae.startOnce.Do(func() {
|
||||
ae.mu.Lock()
|
||||
defer ae.mu.Unlock()
|
||||
|
||||
err := ae.doStartLocked()
|
||||
if err == nil {
|
||||
ae.started = true
|
||||
return nil
|
||||
}
|
||||
ae.disconnectedCh = make(chan bool, 1)
|
||||
ae.stopCh = make(chan bool)
|
||||
ae.backgroundConnectionDoneCh = make(chan bool)
|
||||
|
||||
// Otherwise we have an error and should clean up to avoid leaking resources.
|
||||
ae.started = false
|
||||
if ae.grpcClientConn != nil {
|
||||
ae.grpcClientConn.Close()
|
||||
}
|
||||
ae.setStateDisconnected()
|
||||
go ae.indefiniteBackgroundConnection()
|
||||
|
||||
err = nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -123,51 +165,65 @@ func (ae *Exporter) prepareAgentAddress() string {
|
|||
if ae.agentAddress != "" {
|
||||
return ae.agentAddress
|
||||
}
|
||||
port := DefaultAgentPort
|
||||
if ae.agentPort > 0 {
|
||||
port = ae.agentPort
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", DefaultAgentHost, port)
|
||||
return fmt.Sprintf("%s:%d", DefaultAgentHost, DefaultAgentPort)
|
||||
}
|
||||
|
||||
func (ae *Exporter) doStartLocked() error {
|
||||
if ae.started {
|
||||
return nil
|
||||
func (ae *Exporter) enableConnectionStreams(cc *grpc.ClientConn) error {
|
||||
ae.mu.RLock()
|
||||
started := ae.started
|
||||
nodeInfo := ae.nodeInfo
|
||||
ae.mu.RUnlock()
|
||||
|
||||
if !started {
|
||||
return errNotStarted
|
||||
}
|
||||
|
||||
// Now start it
|
||||
cc, err := ae.dialToAgent()
|
||||
if err != nil {
|
||||
return err
|
||||
ae.mu.Lock()
|
||||
// If the previous clientConn was non-nil, close it
|
||||
if ae.grpcClientConn != nil {
|
||||
_ = ae.grpcClientConn.Close()
|
||||
}
|
||||
ae.grpcClientConn = cc
|
||||
ae.mu.Unlock()
|
||||
|
||||
if err := ae.createTraceServiceConnection(ae.grpcClientConn, nodeInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ae.createMetricsServiceConnection(ae.grpcClientConn, nodeInfo)
|
||||
}
|
||||
|
||||
func (ae *Exporter) createTraceServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
|
||||
// Initiate the trace service by sending over node identifier info.
|
||||
traceSvcClient := agenttracepb.NewTraceServiceClient(cc)
|
||||
traceExporter, err := traceSvcClient.Export(context.Background())
|
||||
ctx := context.Background()
|
||||
if len(ae.headers) > 0 {
|
||||
ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
|
||||
}
|
||||
traceExporter, err := traceSvcClient.Export(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err)
|
||||
}
|
||||
|
||||
firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{Node: ae.nodeInfo}
|
||||
err = nTriesWithExponentialBackoff(maxInitialTracesRetries, 200*time.Microsecond, func() error {
|
||||
return traceExporter.Send(firstTraceMessage)
|
||||
})
|
||||
if err != nil {
|
||||
firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{
|
||||
Node: node,
|
||||
Resource: ae.resource,
|
||||
}
|
||||
if err := traceExporter.Send(firstTraceMessage); err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
|
||||
}
|
||||
|
||||
ae.mu.Lock()
|
||||
ae.traceExporter = traceExporter
|
||||
ae.mu.Unlock()
|
||||
|
||||
// Initiate the config service by sending over node identifier info.
|
||||
configStream, err := traceSvcClient.Config(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err)
|
||||
}
|
||||
firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: ae.nodeInfo}
|
||||
err = nTriesWithExponentialBackoff(maxInitialConfigRetries, 200*time.Microsecond, func() error {
|
||||
return configStream.Send(firstCfgMessage)
|
||||
})
|
||||
if err != nil {
|
||||
firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node}
|
||||
if err := configStream.Send(firstCfgMessage); err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
|
||||
}
|
||||
|
||||
|
@ -178,32 +234,51 @@ func (ae *Exporter) doStartLocked() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// dialToAgent performs a best case attempt to dial to the agent.
|
||||
// It retries failed dials with:
|
||||
// * gRPC dialTimeout of 1s
|
||||
// * exponential backoff, 5 times with a period of 50ms
|
||||
// hence in the worst case of (no agent actually available), it
|
||||
// will take at least:
|
||||
// (5 * 1s) + ((1<<5)-1) * 0.01 s = 5s + 1.55s = 6.55s
|
||||
func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
|
||||
addr := ae.prepareAgentAddress()
|
||||
dialOpts := []grpc.DialOption{grpc.WithBlock()}
|
||||
if ae.canDialInsecure {
|
||||
dialOpts = append(dialOpts, grpc.WithInsecure())
|
||||
func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
|
||||
metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc)
|
||||
metricsExporter, err := metricsSvcClient.Export(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err)
|
||||
}
|
||||
// Initiate the metrics service by sending over the first message just containing the Node and Resource.
|
||||
firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{
|
||||
Node: node,
|
||||
Resource: ae.resource,
|
||||
}
|
||||
if err := metricsExporter.Send(firstMetricsMessage); err != nil {
|
||||
return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err)
|
||||
}
|
||||
|
||||
var cc *grpc.ClientConn
|
||||
dialOpts = append(dialOpts, grpc.WithTimeout(1*time.Second))
|
||||
dialBackoffWaitPeriod := 50 * time.Millisecond
|
||||
err := nTriesWithExponentialBackoff(5, dialBackoffWaitPeriod, func() error {
|
||||
var err error
|
||||
cc, err = grpc.Dial(addr, dialOpts...)
|
||||
return err
|
||||
})
|
||||
return cc, err
|
||||
ae.mu.Lock()
|
||||
ae.metricsExporter = metricsExporter
|
||||
ae.mu.Unlock()
|
||||
|
||||
// With that we are good to go and can start sending metrics
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
|
||||
addr := ae.prepareAgentAddress()
|
||||
var dialOpts []grpc.DialOption
|
||||
if ae.clientTransportCredentials != nil {
|
||||
dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials))
|
||||
} else if ae.canDialInsecure {
|
||||
dialOpts = append(dialOpts, grpc.WithInsecure())
|
||||
}
|
||||
if ae.compressor != "" {
|
||||
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor)))
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if len(ae.headers) > 0 {
|
||||
ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
|
||||
}
|
||||
return grpc.DialContext(ctx, addr, dialOpts...)
|
||||
}
|
||||
|
||||
func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error {
|
||||
// Note: We haven't yet implemented configuration sending so we
|
||||
// should NOT be changing connection states within this function for now.
|
||||
for {
|
||||
recv, err := configStream.Recv()
|
||||
if err != nil {
|
||||
|
@ -219,7 +294,7 @@ func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService
|
|||
if psamp := cfg.GetProbabilitySampler(); psamp != nil {
|
||||
trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)})
|
||||
} else if csamp := cfg.GetConstantSampler(); csamp != nil {
|
||||
alwaysSample := csamp.Decision == true
|
||||
alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON
|
||||
if alwaysSample {
|
||||
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||
} else {
|
||||
|
@ -236,20 +311,19 @@ func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService
|
|||
}
|
||||
}
|
||||
|
||||
var (
|
||||
errNotStarted = errors.New("not started")
|
||||
)
|
||||
|
||||
// Stop shuts down all the connections and resources
|
||||
// related to the exporter.
|
||||
func (ae *Exporter) Stop() error {
|
||||
ae.mu.Lock()
|
||||
defer ae.mu.Unlock()
|
||||
ae.mu.RLock()
|
||||
cc := ae.grpcClientConn
|
||||
started := ae.started
|
||||
stopped := ae.stopped
|
||||
ae.mu.RUnlock()
|
||||
|
||||
if !ae.started {
|
||||
if !started {
|
||||
return errNotStarted
|
||||
}
|
||||
if ae.stopped {
|
||||
if stopped {
|
||||
// TODO: tell the user that we've already stopped, so perhaps a sentinel error?
|
||||
return nil
|
||||
}
|
||||
|
@ -258,13 +332,19 @@ func (ae *Exporter) Stop() error {
|
|||
|
||||
// Now close the underlying gRPC connection.
|
||||
var err error
|
||||
if ae.grpcClientConn != nil {
|
||||
err = ae.grpcClientConn.Close()
|
||||
if cc != nil {
|
||||
err = cc.Close()
|
||||
}
|
||||
|
||||
// At this point we can change the state variables: started and stopped
|
||||
ae.mu.Lock()
|
||||
ae.started = false
|
||||
ae.stopped = true
|
||||
ae.mu.Unlock()
|
||||
close(ae.stopCh)
|
||||
|
||||
// Ensure that the backgroundConnector returns
|
||||
<-ae.backgroundConnectionDoneCh
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -273,27 +353,142 @@ func (ae *Exporter) ExportSpan(sd *trace.SpanData) {
|
|||
if sd == nil {
|
||||
return
|
||||
}
|
||||
_ = ae.traceBundler.Add(sd, -1)
|
||||
_ = ae.traceBundler.Add(sd, 1)
|
||||
}
|
||||
|
||||
func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
|
||||
if len(sdl) == 0 {
|
||||
func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error {
|
||||
if batch == nil || len(batch.Spans) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return errStopped
|
||||
|
||||
default:
|
||||
if !ae.connected() {
|
||||
return errNoConnection
|
||||
}
|
||||
|
||||
ae.senderMu.Lock()
|
||||
err := ae.traceExporter.Send(batch)
|
||||
ae.senderMu.Unlock()
|
||||
if err != nil {
|
||||
ae.setStateDisconnected()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ae *Exporter) ExportView(vd *view.Data) {
|
||||
if vd == nil {
|
||||
return
|
||||
}
|
||||
_ = ae.viewDataBundler.Add(vd, 1)
|
||||
}
|
||||
|
||||
func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span {
|
||||
if len(sdl) == 0 {
|
||||
return nil
|
||||
}
|
||||
protoSpans := make([]*tracepb.Span, 0, len(sdl))
|
||||
for _, sd := range sdl {
|
||||
if sd != nil {
|
||||
protoSpans = append(protoSpans, ocSpanToProtoSpan(sd))
|
||||
}
|
||||
}
|
||||
return protoSpans
|
||||
}
|
||||
|
||||
if len(protoSpans) > 0 {
|
||||
_ = ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
|
||||
func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return
|
||||
|
||||
default:
|
||||
if !ae.connected() {
|
||||
return
|
||||
}
|
||||
|
||||
protoSpans := ocSpanDataToPbSpans(sdl)
|
||||
if len(protoSpans) == 0 {
|
||||
return
|
||||
}
|
||||
ae.senderMu.Lock()
|
||||
err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
|
||||
Spans: protoSpans,
|
||||
})
|
||||
ae.senderMu.Unlock()
|
||||
if err != nil {
|
||||
ae.setStateDisconnected()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric {
|
||||
if len(vdl) == 0 {
|
||||
return nil
|
||||
}
|
||||
metrics := make([]*metricspb.Metric, 0, len(vdl))
|
||||
for _, vd := range vdl {
|
||||
if vd != nil {
|
||||
vmetric, err := viewDataToMetric(vd)
|
||||
// TODO: (@odeke-em) somehow report this error, if it is non-nil.
|
||||
if err == nil && vmetric != nil {
|
||||
metrics = append(metrics, vmetric)
|
||||
}
|
||||
}
|
||||
}
|
||||
return metrics
|
||||
}
|
||||
|
||||
func (ae *Exporter) uploadViewData(vdl []*view.Data) {
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return
|
||||
|
||||
default:
|
||||
if !ae.connected() {
|
||||
return
|
||||
}
|
||||
|
||||
protoMetrics := ocViewDataToPbMetrics(vdl)
|
||||
if len(protoMetrics) == 0 {
|
||||
return
|
||||
}
|
||||
err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{
|
||||
Metrics: protoMetrics,
|
||||
// TODO:(@odeke-em)
|
||||
// a) Figure out how to derive a Node from the environment
|
||||
// b) Figure out how to derive a Resource from the environment
|
||||
// or better letting users of the exporter configure it.
|
||||
})
|
||||
if err != nil {
|
||||
ae.setStateDisconnected()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ae *Exporter) Flush() {
|
||||
ae.traceBundler.Flush()
|
||||
ae.viewDataBundler.Flush()
|
||||
}
|
||||
|
||||
func resourceProtoFromEnv() *resourcepb.Resource {
|
||||
rs, _ := resource.FromEnv(context.Background())
|
||||
if rs == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
rprs := &resourcepb.Resource{
|
||||
Type: rs.Type,
|
||||
}
|
||||
if rs.Labels != nil {
|
||||
rprs.Labels = make(map[string]string)
|
||||
for k, v := range rs.Labels {
|
||||
rprs.Labels[k] = v
|
||||
}
|
||||
}
|
||||
return rprs
|
||||
}
|
||||
|
|
76
vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
generated
vendored
76
vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
generated
vendored
|
@ -14,6 +14,12 @@
|
|||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultAgentPort uint16 = 55678
|
||||
DefaultAgentHost string = "localhost"
|
||||
|
@ -23,14 +29,6 @@ type ExporterOption interface {
|
|||
withExporter(e *Exporter)
|
||||
}
|
||||
|
||||
type portSetter uint16
|
||||
|
||||
func (ps portSetter) withExporter(e *Exporter) {
|
||||
e.agentPort = uint16(ps)
|
||||
}
|
||||
|
||||
var _ ExporterOption = (*portSetter)(nil)
|
||||
|
||||
type insecureGrpcConnection int
|
||||
|
||||
var _ ExporterOption = (*insecureGrpcConnection)(nil)
|
||||
|
@ -44,12 +42,6 @@ func (igc *insecureGrpcConnection) withExporter(e *Exporter) {
|
|||
// does. Note, by default, client security is required unless WithInsecure is used.
|
||||
func WithInsecure() ExporterOption { return new(insecureGrpcConnection) }
|
||||
|
||||
// WithPort allows one to override the port that the exporter will
|
||||
// connect to the agent on, instead of using DefaultAgentPort.
|
||||
func WithPort(port uint16) ExporterOption {
|
||||
return portSetter(port)
|
||||
}
|
||||
|
||||
type addressSetter string
|
||||
|
||||
func (as addressSetter) withExporter(e *Exporter) {
|
||||
|
@ -78,3 +70,59 @@ var _ ExporterOption = (*serviceNameSetter)(nil)
|
|||
func WithServiceName(serviceName string) ExporterOption {
|
||||
return serviceNameSetter(serviceName)
|
||||
}
|
||||
|
||||
type reconnectionPeriod time.Duration
|
||||
|
||||
func (rp reconnectionPeriod) withExporter(e *Exporter) {
|
||||
e.reconnectionPeriod = time.Duration(rp)
|
||||
}
|
||||
|
||||
func WithReconnectionPeriod(rp time.Duration) ExporterOption {
|
||||
return reconnectionPeriod(rp)
|
||||
}
|
||||
|
||||
type compressorSetter string
|
||||
|
||||
func (c compressorSetter) withExporter(e *Exporter) {
|
||||
e.compressor = string(c)
|
||||
}
|
||||
|
||||
// UseCompressor will set the compressor for the gRPC client to use when sending requests.
|
||||
// It is the responsibility of the caller to ensure that the compressor set has been registered
|
||||
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
|
||||
// compressors auto-register on import, such as gzip, which can be registered by calling
|
||||
// `import _ "google.golang.org/grpc/encoding/gzip"`
|
||||
func UseCompressor(compressorName string) ExporterOption {
|
||||
return compressorSetter(compressorName)
|
||||
}
|
||||
|
||||
type headerSetter map[string]string
|
||||
|
||||
func (h headerSetter) withExporter(e *Exporter) {
|
||||
e.headers = map[string]string(h)
|
||||
}
|
||||
|
||||
// WithHeaders will send the provided headers when the gRPC stream connection
|
||||
// is instantiated
|
||||
func WithHeaders(headers map[string]string) ExporterOption {
|
||||
return headerSetter(headers)
|
||||
}
|
||||
|
||||
type clientCredentials struct {
|
||||
credentials.TransportCredentials
|
||||
}
|
||||
|
||||
var _ ExporterOption = (*clientCredentials)(nil)
|
||||
|
||||
// WithTLSCredentials allows the connection to use TLS credentials
|
||||
// when talking to the server. It takes in grpc.TransportCredentials instead
|
||||
// of say a Certificate file or a tls.Certificate, because the retrieving
|
||||
// these credentials can be done in many ways e.g. plain file, in code tls.Config
|
||||
// or by certificate rotation, so it is up to the caller to decide what to use.
|
||||
func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption {
|
||||
return &clientCredentials{TransportCredentials: creds}
|
||||
}
|
||||
|
||||
func (cc *clientCredentials) withExporter(e *Exporter) {
|
||||
e.clientTransportCredentials = cc.TransportCredentials
|
||||
}
|
||||
|
|
86
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
generated
vendored
86
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
generated
vendored
|
@ -15,6 +15,7 @@
|
|||
package ocagent
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
|
@ -24,6 +25,11 @@ import (
|
|||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
const (
|
||||
maxAnnotationEventsPerSpan = 32
|
||||
maxMessageEventsPerSpan = 128
|
||||
)
|
||||
|
||||
func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span {
|
||||
if sd == nil {
|
||||
return nil
|
||||
|
@ -43,6 +49,7 @@ func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span {
|
|||
Kind: ocSpanKindToProtoSpanKind(sd.SpanKind),
|
||||
Name: namePtr,
|
||||
Attributes: ocAttributesToProtoAttributes(sd.Attributes),
|
||||
TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents),
|
||||
Tracestate: ocTracestateToProtoTracestate(sd.Tracestate),
|
||||
}
|
||||
}
|
||||
|
@ -122,6 +129,85 @@ func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_A
|
|||
}
|
||||
}
|
||||
|
||||
// This code is mostly copied from
|
||||
// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46
|
||||
func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents {
|
||||
if len(as) == 0 && len(es) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
timeEvents := &tracepb.Span_TimeEvents{}
|
||||
var annotations, droppedAnnotationsCount int
|
||||
var messageEvents, droppedMessageEventsCount int
|
||||
|
||||
// Transform annotations
|
||||
for i, a := range as {
|
||||
if annotations >= maxAnnotationEventsPerSpan {
|
||||
droppedAnnotationsCount = len(as) - i
|
||||
break
|
||||
}
|
||||
annotations++
|
||||
timeEvents.TimeEvent = append(timeEvents.TimeEvent,
|
||||
&tracepb.Span_TimeEvent{
|
||||
Time: timeToTimestamp(a.Time),
|
||||
Value: transformAnnotationToTimeEvent(&a),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Transform message events
|
||||
for i, e := range es {
|
||||
if messageEvents >= maxMessageEventsPerSpan {
|
||||
droppedMessageEventsCount = len(es) - i
|
||||
break
|
||||
}
|
||||
messageEvents++
|
||||
timeEvents.TimeEvent = append(timeEvents.TimeEvent,
|
||||
&tracepb.Span_TimeEvent{
|
||||
Time: timeToTimestamp(e.Time),
|
||||
Value: transformMessageEventToTimeEvent(&e),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Process dropped counter
|
||||
timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
|
||||
timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
|
||||
|
||||
return timeEvents
|
||||
}
|
||||
|
||||
func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ {
|
||||
return &tracepb.Span_TimeEvent_Annotation_{
|
||||
Annotation: &tracepb.Span_TimeEvent_Annotation{
|
||||
Description: &tracepb.TruncatableString{Value: a.Message},
|
||||
Attributes: ocAttributesToProtoAttributes(a.Attributes),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ {
|
||||
return &tracepb.Span_TimeEvent_MessageEvent_{
|
||||
MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
|
||||
Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
|
||||
Id: uint64(e.MessageID),
|
||||
UncompressedSize: uint64(e.UncompressedByteSize),
|
||||
CompressedSize: uint64(e.CompressedByteSize),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// clip32 clips an int to the range of an int32.
|
||||
func clip32(x int) int32 {
|
||||
if x < math.MinInt32 {
|
||||
return math.MinInt32
|
||||
}
|
||||
if x > math.MaxInt32 {
|
||||
return math.MaxInt32
|
||||
}
|
||||
return int32(x)
|
||||
}
|
||||
|
||||
func timeToTimestamp(t time.Time) *timestamp.Timestamp {
|
||||
nanoTime := t.UnixNano()
|
||||
return ×tamp.Timestamp{
|
||||
|
|
274
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
generated
vendored
Normal file
274
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
generated
vendored
Normal file
|
@ -0,0 +1,274 @@
|
|||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
|
||||
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
errNilMeasure = errors.New("expecting a non-nil stats.Measure")
|
||||
errNilView = errors.New("expecting a non-nil view.View")
|
||||
errNilViewData = errors.New("expecting a non-nil view.Data")
|
||||
)
|
||||
|
||||
func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) {
|
||||
if vd == nil {
|
||||
return nil, errNilViewData
|
||||
}
|
||||
|
||||
descriptor, err := viewToMetricDescriptor(vd.View)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
timeseries, err := viewDataToTimeseries(vd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metric := &metricspb.Metric{
|
||||
MetricDescriptor: descriptor,
|
||||
Timeseries: timeseries,
|
||||
}
|
||||
return metric, nil
|
||||
}
|
||||
|
||||
func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) {
|
||||
if v == nil {
|
||||
return nil, errNilView
|
||||
}
|
||||
if v.Measure == nil {
|
||||
return nil, errNilMeasure
|
||||
}
|
||||
|
||||
desc := &metricspb.MetricDescriptor{
|
||||
Name: stringOrCall(v.Name, v.Measure.Name),
|
||||
Description: stringOrCall(v.Description, v.Measure.Description),
|
||||
Unit: v.Measure.Unit(),
|
||||
Type: aggregationToMetricDescriptorType(v),
|
||||
LabelKeys: tagKeysToLabelKeys(v.TagKeys),
|
||||
}
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
func stringOrCall(first string, call func() string) string {
|
||||
if first != "" {
|
||||
return first
|
||||
}
|
||||
return call()
|
||||
}
|
||||
|
||||
type measureType uint
|
||||
|
||||
const (
|
||||
measureUnknown measureType = iota
|
||||
measureInt64
|
||||
measureFloat64
|
||||
)
|
||||
|
||||
func measureTypeFromMeasure(m stats.Measure) measureType {
|
||||
switch m.(type) {
|
||||
default:
|
||||
return measureUnknown
|
||||
case *stats.Float64Measure:
|
||||
return measureFloat64
|
||||
case *stats.Int64Measure:
|
||||
return measureInt64
|
||||
}
|
||||
}
|
||||
|
||||
func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type {
|
||||
if v == nil || v.Aggregation == nil {
|
||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||
}
|
||||
if v.Measure == nil {
|
||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||
}
|
||||
|
||||
switch v.Aggregation.Type {
|
||||
case view.AggTypeCount:
|
||||
// Cumulative on int64
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_INT64
|
||||
|
||||
case view.AggTypeDistribution:
|
||||
// Cumulative types
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION
|
||||
|
||||
case view.AggTypeLastValue:
|
||||
// Gauge types
|
||||
switch measureTypeFromMeasure(v.Measure) {
|
||||
case measureFloat64:
|
||||
return metricspb.MetricDescriptor_GAUGE_DOUBLE
|
||||
case measureInt64:
|
||||
return metricspb.MetricDescriptor_GAUGE_INT64
|
||||
}
|
||||
|
||||
case view.AggTypeSum:
|
||||
// Cumulative types
|
||||
switch measureTypeFromMeasure(v.Measure) {
|
||||
case measureFloat64:
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE
|
||||
case measureInt64:
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_INT64
|
||||
}
|
||||
}
|
||||
|
||||
// For all other cases, return unspecified.
|
||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||
}
|
||||
|
||||
func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey {
|
||||
labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys))
|
||||
for _, tagKey := range tagKeys {
|
||||
labelKeys = append(labelKeys, &metricspb.LabelKey{
|
||||
Key: tagKey.Name(),
|
||||
})
|
||||
}
|
||||
return labelKeys
|
||||
}
|
||||
|
||||
func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) {
|
||||
if vd == nil || len(vd.Rows) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Given that view.Data only contains Start, End
|
||||
// the timestamps for all the row data will be the exact same
|
||||
// per aggregation. However, the values will differ.
|
||||
// Each row has its own tags.
|
||||
startTimestamp := timeToProtoTimestamp(vd.Start)
|
||||
endTimestamp := timeToProtoTimestamp(vd.End)
|
||||
|
||||
mType := measureTypeFromMeasure(vd.View.Measure)
|
||||
timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows))
|
||||
// It is imperative that the ordering of "LabelValues" matches those
|
||||
// of the Label keys in the metric descriptor.
|
||||
for _, row := range vd.Rows {
|
||||
labelValues := labelValuesFromTags(row.Tags)
|
||||
point := rowToPoint(vd.View, row, endTimestamp, mType)
|
||||
timeseries = append(timeseries, &metricspb.TimeSeries{
|
||||
StartTimestamp: startTimestamp,
|
||||
LabelValues: labelValues,
|
||||
Points: []*metricspb.Point{point},
|
||||
})
|
||||
}
|
||||
|
||||
if len(timeseries) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return timeseries, nil
|
||||
}
|
||||
|
||||
func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp {
|
||||
unixNano := t.UnixNano()
|
||||
return ×tamp.Timestamp{
|
||||
Seconds: int64(unixNano / 1e9),
|
||||
Nanos: int32(unixNano % 1e9),
|
||||
}
|
||||
}
|
||||
|
||||
func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point {
|
||||
pt := &metricspb.Point{
|
||||
Timestamp: endTimestamp,
|
||||
}
|
||||
|
||||
switch data := row.Data.(type) {
|
||||
case *view.CountData:
|
||||
pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value}
|
||||
|
||||
case *view.DistributionData:
|
||||
pt.Value = &metricspb.Point_DistributionValue{
|
||||
DistributionValue: &metricspb.DistributionValue{
|
||||
Count: data.Count,
|
||||
Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count
|
||||
// TODO: Add Exemplar
|
||||
Buckets: bucketsToProtoBuckets(data.CountPerBucket),
|
||||
BucketOptions: &metricspb.DistributionValue_BucketOptions{
|
||||
Type: &metricspb.DistributionValue_BucketOptions_Explicit_{
|
||||
Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{
|
||||
Bounds: v.Aggregation.Buckets,
|
||||
},
|
||||
},
|
||||
},
|
||||
SumOfSquaredDeviation: data.SumOfSquaredDev,
|
||||
}}
|
||||
|
||||
case *view.LastValueData:
|
||||
setPointValue(pt, data.Value, mType)
|
||||
|
||||
case *view.SumData:
|
||||
setPointValue(pt, data.Value, mType)
|
||||
}
|
||||
|
||||
return pt
|
||||
}
|
||||
|
||||
// Not returning anything from this function because metricspb.Point.is_Value is an unexported
|
||||
// interface hence we just have to set its value by pointer.
|
||||
func setPointValue(pt *metricspb.Point, value float64, mType measureType) {
|
||||
if mType == measureInt64 {
|
||||
pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)}
|
||||
} else {
|
||||
pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value}
|
||||
}
|
||||
}
|
||||
|
||||
func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket {
|
||||
distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket))
|
||||
for i := 0; i < len(countPerBucket); i++ {
|
||||
count := countPerBucket[i]
|
||||
|
||||
distBuckets[i] = &metricspb.DistributionValue_Bucket{
|
||||
Count: count,
|
||||
}
|
||||
}
|
||||
|
||||
return distBuckets
|
||||
}
|
||||
|
||||
func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue {
|
||||
if len(tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
labelValues := make([]*metricspb.LabelValue, 0, len(tags))
|
||||
for _, tag_ := range tags {
|
||||
labelValues = append(labelValues, &metricspb.LabelValue{
|
||||
Value: tag_.Value,
|
||||
|
||||
// It is imperative that we set the "HasValue" attribute,
|
||||
// in order to distinguish missing a label from the empty string.
|
||||
// https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue
|
||||
//
|
||||
// OpenCensus-Go uses non-pointers for tags as seen by this function's arguments,
|
||||
// so the best case that we can use to distinguish missing labels/tags from the
|
||||
// empty string is by checking if the Tag.Key.Name() != "" to indicate that we have
|
||||
// a value.
|
||||
HasValue: tag_.Key.Name() != "",
|
||||
})
|
||||
}
|
||||
return labelValues
|
||||
}
|
27
vendor/github.com/Azure/go-autorest/.travis.yml
generated
vendored
27
vendor/github.com/Azure/go-autorest/.travis.yml
generated
vendored
|
@ -3,31 +3,32 @@ sudo: false
|
|||
language: go
|
||||
|
||||
go:
|
||||
- master
|
||||
- 1.11.x
|
||||
- 1.10.x
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
|
||||
env:
|
||||
- DEP_VERSION="0.5.0"
|
||||
global:
|
||||
- DEP_RELEASE_TAG=v0.5.1 # so the script knows which version to use
|
||||
- GOSEC_RELEASE_TAG=1.3.0
|
||||
|
||||
before_install:
|
||||
- curl -L -o $GOPATH/bin/dep https://github.com/golang/dep/releases/download/v$DEP_VERSION/dep-linux-amd64 && chmod +x $GOPATH/bin/dep
|
||||
- curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||
- curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s -- -b $GOPATH/bin $GOSEC_RELEASE_TAG
|
||||
|
||||
install:
|
||||
- go get -u golang.org/x/lint/golint
|
||||
- go get -u github.com/stretchr/testify
|
||||
- go get -u github.com/GoASTScanner/gas
|
||||
- dep ensure
|
||||
- dep ensure -v
|
||||
- go install ./vendor/golang.org/x/lint/golint
|
||||
|
||||
script:
|
||||
- grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee /dev/stderr | test -z "$(< /dev/stdin)"
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.11* ]]; then test -z "$(gofmt -s -l -w ./autorest/. | tee /dev/stderr)"; fi
|
||||
- test -z "$(golint ./autorest/... | tee /dev/stderr)"
|
||||
- go vet ./autorest/...
|
||||
- test -z "$(gas ./autorest/... | tee /dev/stderr | grep Error)"
|
||||
#- test -z "$(gosec ./autorest/... | tee /dev/stderr | grep Error)"
|
||||
- go build -v ./autorest/...
|
||||
- go test -race -v ./autorest/...
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $GOPATH/pkg/dep
|
||||
|
|
67
vendor/github.com/Azure/go-autorest/CHANGELOG.md
generated
vendored
67
vendor/github.com/Azure/go-autorest/CHANGELOG.md
generated
vendored
|
@ -1,10 +1,75 @@
|
|||
# CHANGELOG
|
||||
|
||||
## v11.7.0
|
||||
|
||||
## New Features
|
||||
|
||||
- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package.
|
||||
|
||||
## v11.6.1
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Fix ACR DNS endpoint for government clouds.
|
||||
- Add Cosmos DB DNS endpoints.
|
||||
- Update dependencies to resolve build breaks in OpenCensus.
|
||||
|
||||
## v11.6.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Added type `autorest.BasicAuthorizer` to support Basic authentication.
|
||||
|
||||
## v11.5.2
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Fixed `GetTokenFromCLI` did not work with zsh.
|
||||
|
||||
## v11.5.1
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2.
|
||||
|
||||
## v11.5.0
|
||||
|
||||
### New Features
|
||||
|
||||
- The `auth` package has been refactored so that the environment and file settings are now available.
|
||||
- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created.
|
||||
- Added support for certificate authorization for file-based config.
|
||||
|
||||
## v11.4.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests.
|
||||
- Exported `adal.UserAgent()` for parity with `autorest.Client`.
|
||||
|
||||
## v11.3.2
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline.
|
||||
|
||||
## v11.3.1
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases.
|
||||
|
||||
## v11.3.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type.
|
||||
|
||||
## v11.2.8
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package.
|
||||
- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package.
|
||||
|
||||
## v11.2.7
|
||||
|
||||
|
|
182
vendor/github.com/Azure/go-autorest/Gopkg.lock
generated
vendored
182
vendor/github.com/Azure/go-autorest/Gopkg.lock
generated
vendored
|
@ -2,93 +2,122 @@
|
|||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e1b859e3d9e90007d5fbf25edf57733b224f1857f6592636130afab3af8cfae7"
|
||||
digest = "1:bfd9c4a3ff18fa3715d243c6dff6465eaf3c9c8cd0a61d81ce74ca0f6eb239f9"
|
||||
name = "contrib.go.opencensus.io/exporter/ocagent"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "00af367e65149ff1f2f4b93bbfbb84fd9297170d"
|
||||
version = "v0.2.0"
|
||||
pruneopts = "UT"
|
||||
revision = "902c0ccba68df93f7fefbe7e7c6f16be33108b40"
|
||||
version = "v0.4.9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e0a4505d5cf7ac6b5d92e3aee79d838b5f1ae8e9641ec7fa5d1e9b01d7a7ea95"
|
||||
digest = "1:fdb4ed936abeecb46a8c27dcac83f75c05c87a46d9ec7711411eb785c213fa02"
|
||||
name = "github.com/census-instrumentation/opencensus-proto"
|
||||
packages = [
|
||||
"gen-go/agent/common/v1",
|
||||
"gen-go/agent/metrics/v1",
|
||||
"gen-go/agent/trace/v1",
|
||||
"gen-go/metrics/v1",
|
||||
"gen-go/resource/v1",
|
||||
"gen-go/trace/v1",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "24333298e36590ea0716598caacc8959fc393c48"
|
||||
version = "v0.0.2"
|
||||
pruneopts = "UT"
|
||||
revision = "a105b96453fe85139acc07b68de48f2cbdd71249"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77"
|
||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = ""
|
||||
pruneopts = "UT"
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6"
|
||||
digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55"
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
pruneopts = "UT"
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7f175a633086a933d1940a7e7dc2154a0070a7c25fb4a2f671f3eef1a34d1fd7"
|
||||
digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965"
|
||||
name = "github.com/dimchansky/utfbom"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "5448fe645cb1964ba70ac8f9f2ffe975e61a536c"
|
||||
version = "v1.0.0"
|
||||
pruneopts = "UT"
|
||||
revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3dd078fda7500c341bc26cfbc6c6a34614f295a2457149fc1045cab767cbcf18"
|
||||
digest = "1:489a99067cd08971bd9c1ee0055119ba8febc1429f9200ab0bec68d35e8c4833"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"jsonpb",
|
||||
"proto",
|
||||
"protoc-gen-go/descriptor",
|
||||
"protoc-gen-go/generator",
|
||||
"protoc-gen-go/generator/internal/remap",
|
||||
"protoc-gen-go/plugin",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/struct",
|
||||
"ptypes/timestamp",
|
||||
"ptypes/wrappers",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||
version = "v1.2.0"
|
||||
pruneopts = "UT"
|
||||
revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:096a8a9182648da3d00ff243b88407838902b6703fc12657f76890e08d1899bf"
|
||||
digest = "1:4cbbca3db0ff89197d000fb2fa0b90ca4516a7fbd4d8cd9fa4bebf17df484f6d"
|
||||
name = "github.com/grpc-ecosystem/grpc-gateway"
|
||||
packages = [
|
||||
"internal",
|
||||
"runtime",
|
||||
"utilities",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "20f268a412e5b342ebfb1a0eef7c3b7bd6c260ea"
|
||||
version = "v1.8.5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:67474f760e9ac3799f740db2c489e6423a4cde45520673ec123ac831ad849cb8"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = ["simplelru"]
|
||||
pruneopts = "UT"
|
||||
revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c"
|
||||
version = "v0.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
|
||||
version = "v1.0.0"
|
||||
pruneopts = "UT"
|
||||
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
|
||||
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
pruneopts = ""
|
||||
pruneopts = "UT"
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75"
|
||||
digest = "1:5da8ce674952566deae4dbc23d07c85caafc6cfa815b0b3e03e41979cedb8750"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
pruneopts = "UT"
|
||||
revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ad67dfd3799a2c58f6c65871dd141d8b53f61f600aec48ce8d7fa16a4d5476f8"
|
||||
digest = "1:006e728c96e02ac83ee246183a4b87b44b21e67e004d3a06edd891583a4bf197"
|
||||
name = "go.opencensus.io"
|
||||
packages = [
|
||||
".",
|
||||
|
@ -98,6 +127,7 @@
|
|||
"plugin/ochttp",
|
||||
"plugin/ochttp/propagation/b3",
|
||||
"plugin/ochttp/propagation/tracecontext",
|
||||
"resource",
|
||||
"stats",
|
||||
"stats/internal",
|
||||
"stats/view",
|
||||
|
@ -107,24 +137,35 @@
|
|||
"trace/propagation",
|
||||
"trace/tracestate",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "b7bf3cdb64150a8c8c53b769fdeb2ba581bd4d4b"
|
||||
version = "v0.18.0"
|
||||
pruneopts = "UT"
|
||||
revision = "3b8e2721f2c3c01fa1bf4a2e455874e7b8319cd7"
|
||||
version = "v0.19.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:78f41d38365ccef743e54ed854a2faf73313ba0750c621116a8eeb0395590bd0"
|
||||
digest = "1:994c4915a59f821705d08ea77b117ec7a3e6a46cc867fd194d887500dac1c3c2"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"pkcs12",
|
||||
"pkcs12/internal/rc2",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb"
|
||||
pruneopts = "UT"
|
||||
revision = "a5d413f7728c81fb97d96a2b722368945f651e78"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:547dcb6aebfb7fb17947660ebb034470c13f4d63d893def190a2f7ba3d09bc38"
|
||||
digest = "1:340d6f630598ebfa4e9ee059a137fe928e493e837efd7cbb663dc32fef1d2b0a"
|
||||
name = "golang.org/x/lint"
|
||||
packages = [
|
||||
".",
|
||||
"golint",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d0100b6bd8b389f0385611eb39152c4d7c3a7905"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:4512c74e2c934a151fa9246cdc565d29611f53295bfb05e7a6275e80ee540a0c"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
|
@ -135,27 +176,27 @@
|
|||
"internal/timeseries",
|
||||
"trace",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "49bb7cea24b1df9410e1712aa6433dae904ff66a"
|
||||
pruneopts = "UT"
|
||||
revision = "e3b2ff56ed879efa686c9d09ed74f9bfe42f1a1d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
|
||||
digest = "1:75515eedc0dc2cb0b40372008b616fa2841d831c63eedd403285ff286c593295"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["semaphore"]
|
||||
pruneopts = ""
|
||||
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||
pruneopts = "UT"
|
||||
revision = "e225da77a7e68af35c70ccbf71af2b83e6acac3c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2ed0bf267e44950120acd95570227e28184573ffb099bd85b529ee148e004ddb"
|
||||
digest = "1:5637b3b43405643229e55140bb3049dfa7d73957ba9ecfccc31cdadbfbe0b1f9"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
pruneopts = ""
|
||||
revision = "fa43e7bc11baaae89f3f902b2b4d832b68234844"
|
||||
pruneopts = "UT"
|
||||
revision = "f49334f85ddcf0f08d7fb6dd7363e9e6d6b777eb"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4"
|
||||
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
|
@ -173,45 +214,67 @@
|
|||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = ""
|
||||
pruneopts = "UT"
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:ca2e72555afbcebdead7c961b135650b5111dbbaf37a874de63976fdda57f129"
|
||||
digest = "1:a45ec3bb7c73e52430410dff3e0a5534ce518f72a8eb4355bc8502c546b91ecc"
|
||||
name = "golang.org/x/tools"
|
||||
packages = [
|
||||
"go/ast/astutil",
|
||||
"go/gcexportdata",
|
||||
"go/internal/gcimporter",
|
||||
"go/types/typeutil",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "5a8dccf5b48ae388eebac31f4077f14adda4dbc6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5f003878aabe31d7f6b842d4de32b41c46c214bb629bb485387dbcce1edf5643"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["support/bundler"]
|
||||
pruneopts = ""
|
||||
revision = "c51f30376ab7ec4f22b65de846a41593c8b70f07"
|
||||
pruneopts = "UT"
|
||||
revision = "e742f5a8defa1f9f5d723dfa04c962e680dc33f0"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1b3b4ec811695907c4a3cb92e4f32834a4a42459bff7e02068b6b2b5344803cd"
|
||||
digest = "1:5dc13cd8c0b417e12d187938d2ccb2348dda90f6d3d4a3d8402a40677a5ac982"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
pruneopts = ""
|
||||
revision = "af9cb2a35e7f169ec875002c1829c9b315cddc04"
|
||||
packages = [
|
||||
"googleapis/api/httpbody",
|
||||
"googleapis/rpc/status",
|
||||
"protobuf/field_mask",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "e79c0c59cdb5e117ef82a6f885294df3d74065d5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:15656947b87a6a240e61dcfae9e71a55a8d5677f240d12ab48f02cdbabf1e309"
|
||||
digest = "1:c00eb80d7b152379c3e94c38d82b29deca98b1d0f53e4e20362589b7fcbffa07"
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"binarylog/grpc_binarylog_v1",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"credentials/internal",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclog",
|
||||
"internal",
|
||||
"internal/backoff",
|
||||
"internal/binarylog",
|
||||
"internal/channelz",
|
||||
"internal/envconfig",
|
||||
"internal/grpcrand",
|
||||
"internal/grpcsync",
|
||||
"internal/syscall",
|
||||
"internal/transport",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
|
@ -224,9 +287,9 @@
|
|||
"status",
|
||||
"tap",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1"
|
||||
version = "v1.15.0"
|
||||
pruneopts = "UT"
|
||||
revision = "3507fb8e1a5ad030303c106fef3a47c9fdad16ad"
|
||||
version = "v1.19.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
|
@ -242,6 +305,7 @@
|
|||
"go.opencensus.io/stats/view",
|
||||
"go.opencensus.io/trace",
|
||||
"golang.org/x/crypto/pkcs12",
|
||||
"golang.org/x/lint/golint",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
9
vendor/github.com/Azure/go-autorest/Gopkg.toml
generated
vendored
9
vendor/github.com/Azure/go-autorest/Gopkg.toml
generated
vendored
|
@ -19,6 +19,11 @@
|
|||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
required = ["golang.org/x/lint/golint"]
|
||||
|
||||
[prune]
|
||||
unused-packages = true
|
||||
go-tests = true
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
|
@ -38,8 +43,8 @@
|
|||
|
||||
[[constraint]]
|
||||
name = "go.opencensus.io"
|
||||
version = "0.18.0"
|
||||
version = "0.19.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "contrib.go.opencensus.io/exporter/ocagent"
|
||||
version = "0.2.0"
|
||||
version = "0.4.5"
|
||||
|
|
2
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
2
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
|
@ -796,7 +796,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
|
|||
if err != nil {
|
||||
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
|
||||
}
|
||||
req.Header.Add("User-Agent", userAgent())
|
||||
req.Header.Add("User-Agent", UserAgent())
|
||||
req = req.WithContext(ctx)
|
||||
if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
|
||||
v := url.Values{}
|
||||
|
|
12
vendor/github.com/Azure/go-autorest/autorest/adal/version.go
generated
vendored
12
vendor/github.com/Azure/go-autorest/autorest/adal/version.go
generated
vendored
|
@ -30,6 +30,16 @@ var (
|
|||
)
|
||||
)
|
||||
|
||||
func userAgent() string {
|
||||
// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version.
|
||||
func UserAgent() string {
|
||||
return ua
|
||||
}
|
||||
|
||||
// AddToUserAgent adds an extension to the current user agent
|
||||
func AddToUserAgent(extension string) error {
|
||||
if extension != "" {
|
||||
ua = fmt.Sprintf("%s %s", ua, extension)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua)
|
||||
}
|
||||
|
|
27
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
27
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
|
@ -15,6 +15,7 @@ package autorest
|
|||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -31,6 +32,8 @@ const (
|
|||
apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key"
|
||||
bingAPISdkHeader = "X-BingApis-SDK-Client"
|
||||
golangBingAPISdkHeaderValue = "Go-SDK"
|
||||
authorization = "Authorization"
|
||||
basic = "Basic"
|
||||
)
|
||||
|
||||
// Authorizer is the interface that provides a PrepareDecorator used to supply request
|
||||
|
@ -258,3 +261,27 @@ func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
|||
}
|
||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
||||
}
|
||||
|
||||
// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header
|
||||
// with the value "Basic <TOKEN>" where <TOKEN> is a base64-encoded username:password tuple.
|
||||
type BasicAuthorizer struct {
|
||||
userName string
|
||||
password string
|
||||
}
|
||||
|
||||
// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password.
|
||||
func NewBasicAuthorizer(userName, password string) *BasicAuthorizer {
|
||||
return &BasicAuthorizer{
|
||||
userName: userName,
|
||||
password: password,
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
||||
// value is "Basic " followed by the base64-encoded username:password tuple.
|
||||
func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
headers := make(map[string]interface{})
|
||||
headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password)))
|
||||
|
||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
||||
}
|
||||
|
|
10
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
10
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
|
@ -181,6 +181,10 @@ func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) e
|
|||
// running operation has completed, the provided context is cancelled, or the client's
|
||||
// polling duration has been exceeded. It will retry failed polling attempts based on
|
||||
// the retry value defined in the client up to the maximum retry attempts.
|
||||
// If no deadline is specified in the context then the client.PollingDuration will be
|
||||
// used to determine if a default deadline should be used.
|
||||
// If PollingDuration is greater than zero the value will be used as the context's timeout.
|
||||
// If PollingDuration is zero then no default deadline will be used.
|
||||
func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) {
|
||||
ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef")
|
||||
defer func() {
|
||||
|
@ -192,7 +196,9 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien
|
|||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
cancelCtx := ctx
|
||||
if d := client.PollingDuration; d != 0 {
|
||||
// if the provided context already has a deadline don't override it
|
||||
_, hasDeadline := ctx.Deadline()
|
||||
if d := client.PollingDuration; !hasDeadline && d != 0 {
|
||||
var cancel context.CancelFunc
|
||||
cancelCtx, cancel = context.WithTimeout(ctx, d)
|
||||
defer cancel()
|
||||
|
@ -824,8 +830,6 @@ func (pt *pollingTrackerPut) updatePollingMethod() error {
|
|||
pt.URI = lh
|
||||
pt.Pm = PollingLocation
|
||||
}
|
||||
// when both headers are returned we use the value in the Location header for the final GET
|
||||
pt.FinalGetURI = lh
|
||||
}
|
||||
// make sure a polling URL was found
|
||||
if pt.URI == "" {
|
||||
|
|
13
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
13
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
|
@ -54,6 +54,7 @@ type Environment struct {
|
|||
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
|
||||
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
|
||||
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
|
||||
CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
|
||||
TokenAudience string `json:"tokenAudience"`
|
||||
}
|
||||
|
||||
|
@ -79,6 +80,7 @@ var (
|
|||
ServiceManagementVMDNSSuffix: "cloudapp.net",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
CosmosDBDNSSuffix: "documents.azure.com",
|
||||
TokenAudience: "https://management.azure.com/",
|
||||
}
|
||||
|
||||
|
@ -102,7 +104,8 @@ var (
|
|||
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
|
||||
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
ContainerRegistryDNSSuffix: "azurecr.us",
|
||||
CosmosDBDNSSuffix: "documents.azure.us",
|
||||
TokenAudience: "https://management.usgovcloudapi.net/",
|
||||
}
|
||||
|
||||
|
@ -126,7 +129,8 @@ var (
|
|||
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn",
|
||||
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
ContainerRegistryDNSSuffix: "azurecr.cn",
|
||||
CosmosDBDNSSuffix: "documents.azure.cn",
|
||||
TokenAudience: "https://management.chinacloudapi.cn/",
|
||||
}
|
||||
|
||||
|
@ -150,8 +154,9 @@ var (
|
|||
ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
|
||||
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
TokenAudience: "https://management.microsoftazure.de/",
|
||||
// ContainerRegistryDNSSuffix: "", ACR not present yet in the German Cloud
|
||||
CosmosDBDNSSuffix: "documents.microsoftazure.de",
|
||||
TokenAudience: "https://management.microsoftazure.de/",
|
||||
}
|
||||
)
|
||||
|
||||
|
|
6
vendor/github.com/Azure/go-autorest/autorest/client.go
generated
vendored
6
vendor/github.com/Azure/go-autorest/autorest/client.go
generated
vendored
|
@ -16,6 +16,7 @@ package autorest
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -230,6 +231,11 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
|
|||
func (c Client) sender() Sender {
|
||||
if c.Sender == nil {
|
||||
j, _ := cookiejar.New(nil)
|
||||
tracing.Transport.Base = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
},
|
||||
}
|
||||
client := &http.Client{Jar: j, Transport: tracing.Transport}
|
||||
return client
|
||||
}
|
||||
|
|
2
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
2
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
|
@ -19,7 +19,7 @@ import (
|
|||
"runtime"
|
||||
)
|
||||
|
||||
const number = "v11.2.8"
|
||||
const number = "v11.7.0"
|
||||
|
||||
var (
|
||||
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
|
||||
|
|
5
vendor/github.com/PuerkitoBio/purell/.gitignore
generated
vendored
Normal file
5
vendor/github.com/PuerkitoBio/purell/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
*.sublime-*
|
||||
.DS_Store
|
||||
*.swp
|
||||
*.swo
|
||||
tags
|
12
vendor/github.com/PuerkitoBio/purell/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/PuerkitoBio/purell/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- "1.10.x"
|
||||
- "1.11.x"
|
||||
- tip
|
12
vendor/github.com/PuerkitoBio/purell/LICENSE
generated
vendored
Normal file
12
vendor/github.com/PuerkitoBio/purell/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
Copyright (c) 2012, Martin Angers
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
188
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
Normal file
188
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
|||
# Purell
|
||||
|
||||
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
|
||||
|
||||
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
|
||||
|
||||
[](http://travis-ci.org/PuerkitoBio/purell)
|
||||
|
||||
## Install
|
||||
|
||||
`go get github.com/PuerkitoBio/purell`
|
||||
|
||||
## Changelog
|
||||
|
||||
* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor).
|
||||
* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
|
||||
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
|
||||
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
|
||||
* **v0.2.0** : Add benchmarks, Attempt IDN support.
|
||||
* **v0.1.0** : Initial release.
|
||||
|
||||
## Examples
|
||||
|
||||
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
|
||||
|
||||
```go
|
||||
package purell
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func ExampleNormalizeURLString() {
|
||||
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
|
||||
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
// Output: http://somewebsite.com:80/Amazing%3F/url/
|
||||
}
|
||||
|
||||
func ExampleMustNormalizeURLString() {
|
||||
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
|
||||
FlagsUnsafeGreedy)
|
||||
fmt.Print(normalized)
|
||||
|
||||
// Output: http://somewebsite.com/Amazing%FA/url
|
||||
}
|
||||
|
||||
func ExampleNormalizeURL() {
|
||||
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
|
||||
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
|
||||
}
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
|
||||
|
||||
```go
|
||||
const (
|
||||
// Safe normalizations
|
||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
||||
FlagLowercaseHost // http://HOST -> http://host
|
||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
||||
|
||||
// Usually safe normalizations
|
||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
||||
|
||||
// Unsafe normalizations
|
||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
||||
FlagForceHTTP // https://host -> http://host
|
||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
||||
|
||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
||||
// submitted by jehiah
|
||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
||||
|
||||
// Convenience set of safe normalizations
|
||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
||||
|
||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
||||
|
||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
||||
|
||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
||||
|
||||
// Convenience set of all available flags
|
||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
)
|
||||
```
|
||||
|
||||
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
|
||||
|
||||
The [full godoc reference is available on gopkgdoc][godoc].
|
||||
|
||||
Some things to note:
|
||||
|
||||
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
|
||||
|
||||
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
|
||||
- %24 -> $
|
||||
- %26 -> &
|
||||
- %2B-%3B -> +,-./0123456789:;
|
||||
- %3D -> =
|
||||
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
- %5F -> _
|
||||
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
|
||||
- %7E -> ~
|
||||
|
||||
|
||||
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
|
||||
|
||||
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
|
||||
|
||||
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
|
||||
|
||||
### Safe vs Usually Safe vs Unsafe
|
||||
|
||||
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
|
||||
|
||||
Consider the following URL:
|
||||
|
||||
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
Normalizing with the `FlagsSafe` gives:
|
||||
|
||||
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
With the `FlagsUsuallySafeGreedy`:
|
||||
|
||||
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
And with `FlagsUnsafeGreedy`:
|
||||
|
||||
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
|
||||
|
||||
## TODOs
|
||||
|
||||
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
|
||||
|
||||
## Thanks / Contributions
|
||||
|
||||
@rogpeppe
|
||||
@jehiah
|
||||
@opennota
|
||||
@pchristopher1275
|
||||
@zenovich
|
||||
@beeker1121
|
||||
|
||||
## License
|
||||
|
||||
The [BSD 3-Clause license][bsd].
|
||||
|
||||
[bsd]: http://opensource.org/licenses/BSD-3-Clause
|
||||
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
|
||||
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
|
||||
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
|
||||
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
|
||||
[iss7]: https://github.com/PuerkitoBio/purell/issues/7
|
379
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
Normal file
379
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
Normal file
|
@ -0,0 +1,379 @@
|
|||
/*
|
||||
Package purell offers URL normalization as described on the wikipedia page:
|
||||
http://en.wikipedia.org/wiki/URL_normalization
|
||||
*/
|
||||
package purell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/PuerkitoBio/urlesc"
|
||||
"golang.org/x/net/idna"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
"golang.org/x/text/width"
|
||||
)
|
||||
|
||||
// A set of normalization flags determines how a URL will
|
||||
// be normalized.
|
||||
type NormalizationFlags uint
|
||||
|
||||
const (
|
||||
// Safe normalizations
|
||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
||||
FlagLowercaseHost // http://HOST -> http://host
|
||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
||||
|
||||
// Usually safe normalizations
|
||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
||||
|
||||
// Unsafe normalizations
|
||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
||||
FlagForceHTTP // https://host -> http://host
|
||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
||||
|
||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
||||
// submitted by jehiah
|
||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
||||
|
||||
// Convenience set of safe normalizations
|
||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
||||
|
||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
||||
|
||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
||||
|
||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
||||
|
||||
// Convenience set of all available flags
|
||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHttpPort = ":80"
|
||||
defaultHttpsPort = ":443"
|
||||
)
|
||||
|
||||
// Regular expressions used by the normalizations
|
||||
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
|
||||
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
|
||||
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
||||
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
|
||||
var rxEmptyPort = regexp.MustCompile(`:+$`)
|
||||
|
||||
// Map of flags to implementation function.
|
||||
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
|
||||
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
|
||||
|
||||
// Since maps have undefined traversing order, make a slice of ordered keys
|
||||
var flagsOrder = []NormalizationFlags{
|
||||
FlagLowercaseScheme,
|
||||
FlagLowercaseHost,
|
||||
FlagRemoveDefaultPort,
|
||||
FlagRemoveDirectoryIndex,
|
||||
FlagRemoveDotSegments,
|
||||
FlagRemoveFragment,
|
||||
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
|
||||
FlagRemoveDuplicateSlashes,
|
||||
FlagRemoveWWW,
|
||||
FlagAddWWW,
|
||||
FlagSortQuery,
|
||||
FlagDecodeDWORDHost,
|
||||
FlagDecodeOctalHost,
|
||||
FlagDecodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
|
||||
FlagAddTrailingSlash,
|
||||
}
|
||||
|
||||
// ... and then the map, where order is unimportant
|
||||
var flags = map[NormalizationFlags]func(*url.URL){
|
||||
FlagLowercaseScheme: lowercaseScheme,
|
||||
FlagLowercaseHost: lowercaseHost,
|
||||
FlagRemoveDefaultPort: removeDefaultPort,
|
||||
FlagRemoveDirectoryIndex: removeDirectoryIndex,
|
||||
FlagRemoveDotSegments: removeDotSegments,
|
||||
FlagRemoveFragment: removeFragment,
|
||||
FlagForceHTTP: forceHTTP,
|
||||
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
|
||||
FlagRemoveWWW: removeWWW,
|
||||
FlagAddWWW: addWWW,
|
||||
FlagSortQuery: sortQuery,
|
||||
FlagDecodeDWORDHost: decodeDWORDHost,
|
||||
FlagDecodeOctalHost: decodeOctalHost,
|
||||
FlagDecodeHexHost: decodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash: removeTrailingSlash,
|
||||
FlagAddTrailingSlash: addTrailingSlash,
|
||||
}
|
||||
|
||||
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func MustNormalizeURLString(u string, f NormalizationFlags) string {
|
||||
result, e := NormalizeURLString(u, f)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
|
||||
parsed, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if f&FlagLowercaseHost == FlagLowercaseHost {
|
||||
parsed.Host = strings.ToLower(parsed.Host)
|
||||
}
|
||||
|
||||
// The idna package doesn't fully conform to RFC 5895
|
||||
// (https://tools.ietf.org/html/rfc5895), so we do it here.
|
||||
// Taken from Go 1.8 cycle source, courtesy of bradfitz.
|
||||
// TODO: Remove when (if?) idna package conforms to RFC 5895.
|
||||
parsed.Host = width.Fold.String(parsed.Host)
|
||||
parsed.Host = norm.NFC.String(parsed.Host)
|
||||
if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return NormalizeURL(parsed, f), nil
|
||||
}
|
||||
|
||||
// NormalizeURL returns the normalized string.
|
||||
// It takes a parsed URL object as input, as well as the normalization flags.
|
||||
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
|
||||
for _, k := range flagsOrder {
|
||||
if f&k == k {
|
||||
flags[k](u)
|
||||
}
|
||||
}
|
||||
return urlesc.Escape(u)
|
||||
}
|
||||
|
||||
func lowercaseScheme(u *url.URL) {
|
||||
if len(u.Scheme) > 0 {
|
||||
u.Scheme = strings.ToLower(u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
func lowercaseHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = strings.ToLower(u.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func removeDefaultPort(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
scheme := strings.ToLower(u.Scheme)
|
||||
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
||||
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
||||
return ""
|
||||
}
|
||||
return val
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func removeTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if strings.HasSuffix(u.Path, "/") {
|
||||
u.Path = u.Path[:l-1]
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if strings.HasSuffix(u.Host, "/") {
|
||||
u.Host = u.Host[:l-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if !strings.HasSuffix(u.Host, "/") {
|
||||
u.Host += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDotSegments(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
var dotFree []string
|
||||
var lastIsDot bool
|
||||
|
||||
sections := strings.Split(u.Path, "/")
|
||||
for _, s := range sections {
|
||||
if s == ".." {
|
||||
if len(dotFree) > 0 {
|
||||
dotFree = dotFree[:len(dotFree)-1]
|
||||
}
|
||||
} else if s != "." {
|
||||
dotFree = append(dotFree, s)
|
||||
}
|
||||
lastIsDot = (s == "." || s == "..")
|
||||
}
|
||||
// Special case if host does not end with / and new path does not begin with /
|
||||
u.Path = strings.Join(dotFree, "/")
|
||||
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
|
||||
u.Path = "/" + u.Path
|
||||
}
|
||||
// Special case if the last segment was a dot, make sure the path ends with a slash
|
||||
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDirectoryIndex(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
|
||||
}
|
||||
}
|
||||
|
||||
func removeFragment(u *url.URL) {
|
||||
u.Fragment = ""
|
||||
}
|
||||
|
||||
func forceHTTP(u *url.URL) {
|
||||
if strings.ToLower(u.Scheme) == "https" {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
}
|
||||
|
||||
func removeDuplicateSlashes(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
|
||||
}
|
||||
}
|
||||
|
||||
func removeWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = u.Host[4:]
|
||||
}
|
||||
}
|
||||
|
||||
func addWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = "www." + u.Host
|
||||
}
|
||||
}
|
||||
|
||||
func sortQuery(u *url.URL) {
|
||||
q := u.Query()
|
||||
|
||||
if len(q) > 0 {
|
||||
arKeys := make([]string, len(q))
|
||||
i := 0
|
||||
for k := range q {
|
||||
arKeys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(arKeys)
|
||||
buf := new(bytes.Buffer)
|
||||
for _, k := range arKeys {
|
||||
sort.Strings(q[k])
|
||||
for _, v := range q[k] {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteRune('&')
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
|
||||
}
|
||||
}
|
||||
|
||||
// Rebuild the raw query string
|
||||
u.RawQuery = buf.String()
|
||||
}
|
||||
}
|
||||
|
||||
func decodeDWORDHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
var parts [4]int64
|
||||
|
||||
dword, _ := strconv.ParseInt(matches[1], 10, 0)
|
||||
for i, shift := range []uint{24, 16, 8, 0} {
|
||||
parts[i] = dword >> shift & 0xFF
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeOctalHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
|
||||
var parts [4]int64
|
||||
|
||||
for i := 1; i <= 4; i++ {
|
||||
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeHexHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
// Conversion is safe because of regex validation
|
||||
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
|
||||
// Set host as DWORD (base 10) encoded host
|
||||
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
|
||||
// The rest is the same as decoding a DWORD host
|
||||
decodeDWORDHost(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeUnncessaryHostDots(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
|
||||
// Trim the leading and trailing dots
|
||||
u.Host = strings.Trim(matches[1], ".")
|
||||
if len(matches) > 2 {
|
||||
u.Host += matches[2]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeEmptyPortSeparator(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
|
||||
}
|
||||
}
|
15
vendor/github.com/PuerkitoBio/urlesc/.travis.yml
generated
vendored
Normal file
15
vendor/github.com/PuerkitoBio/urlesc/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- tip
|
||||
|
||||
install:
|
||||
- go build .
|
||||
|
||||
script:
|
||||
- go test -v
|
27
vendor/github.com/PuerkitoBio/urlesc/LICENSE
generated
vendored
Normal file
27
vendor/github.com/PuerkitoBio/urlesc/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
16
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
Normal file
16
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
urlesc [](https://travis-ci.org/PuerkitoBio/urlesc) [](http://godoc.org/github.com/PuerkitoBio/urlesc)
|
||||
======
|
||||
|
||||
Package urlesc implements query escaping as per RFC 3986.
|
||||
|
||||
It contains some parts of the net/url package, modified so as to allow
|
||||
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/PuerkitoBio/urlesc
|
||||
|
||||
## License
|
||||
|
||||
Go license (BSD-3-Clause)
|
||||
|
180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
vendored
Normal file
180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package urlesc implements query escaping as per RFC 3986.
|
||||
// It contains some parts of the net/url package, modified so as to allow
|
||||
// some reserved characters incorrectly escaped by net/url.
|
||||
// See https://github.com/golang/go/issues/5684
|
||||
package urlesc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type encoding int
|
||||
|
||||
const (
|
||||
encodePath encoding = 1 + iota
|
||||
encodeUserPassword
|
||||
encodeQueryComponent
|
||||
encodeFragment
|
||||
)
|
||||
|
||||
// Return true if the specified character should be escaped when
|
||||
// appearing in a URL string, according to RFC 3986.
|
||||
func shouldEscape(c byte, mode encoding) bool {
|
||||
// §2.3 Unreserved characters (alphanum)
|
||||
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
|
||||
return false
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
|
||||
return false
|
||||
|
||||
// §2.2 Reserved characters (reserved)
|
||||
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
|
||||
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
|
||||
// Different sections of the URL allow a few of
|
||||
// the reserved characters to appear unescaped.
|
||||
switch mode {
|
||||
case encodePath: // §3.3
|
||||
// The RFC allows sub-delims and : @.
|
||||
// '/', '[' and ']' can be used to assign meaning to individual path
|
||||
// segments. This package only manipulates the path as a whole,
|
||||
// so we allow those as well. That leaves only ? and # to escape.
|
||||
return c == '?' || c == '#'
|
||||
|
||||
case encodeUserPassword: // §3.2.1
|
||||
// The RFC allows : and sub-delims in
|
||||
// userinfo. The parsing of userinfo treats ':' as special so we must escape
|
||||
// all the gen-delims.
|
||||
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
|
||||
|
||||
case encodeQueryComponent: // §3.4
|
||||
// The RFC allows / and ?.
|
||||
return c != '/' && c != '?'
|
||||
|
||||
case encodeFragment: // §4.1
|
||||
// The RFC text is silent but the grammar allows
|
||||
// everything, so escape nothing but #
|
||||
return c == '#'
|
||||
}
|
||||
}
|
||||
|
||||
// Everything else must be escaped.
|
||||
return true
|
||||
}
|
||||
|
||||
// QueryEscape escapes the string so it can be safely placed
|
||||
// inside a URL query.
|
||||
func QueryEscape(s string) string {
|
||||
return escape(s, encodeQueryComponent)
|
||||
}
|
||||
|
||||
func escape(s string, mode encoding) string {
|
||||
spaceCount, hexCount := 0, 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if shouldEscape(c, mode) {
|
||||
if c == ' ' && mode == encodeQueryComponent {
|
||||
spaceCount++
|
||||
} else {
|
||||
hexCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if spaceCount == 0 && hexCount == 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
t := make([]byte, len(s)+2*hexCount)
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; {
|
||||
case c == ' ' && mode == encodeQueryComponent:
|
||||
t[j] = '+'
|
||||
j++
|
||||
case shouldEscape(c, mode):
|
||||
t[j] = '%'
|
||||
t[j+1] = "0123456789ABCDEF"[c>>4]
|
||||
t[j+2] = "0123456789ABCDEF"[c&15]
|
||||
j += 3
|
||||
default:
|
||||
t[j] = s[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return string(t)
|
||||
}
|
||||
|
||||
var uiReplacer = strings.NewReplacer(
|
||||
"%21", "!",
|
||||
"%27", "'",
|
||||
"%28", "(",
|
||||
"%29", ")",
|
||||
"%2A", "*",
|
||||
)
|
||||
|
||||
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
|
||||
func unescapeUserinfo(s string) string {
|
||||
return uiReplacer.Replace(s)
|
||||
}
|
||||
|
||||
// Escape reassembles the URL into a valid URL string.
|
||||
// The general form of the result is one of:
|
||||
//
|
||||
// scheme:opaque
|
||||
// scheme://userinfo@host/path?query#fragment
|
||||
//
|
||||
// If u.Opaque is non-empty, String uses the first form;
|
||||
// otherwise it uses the second form.
|
||||
//
|
||||
// In the second form, the following rules apply:
|
||||
// - if u.Scheme is empty, scheme: is omitted.
|
||||
// - if u.User is nil, userinfo@ is omitted.
|
||||
// - if u.Host is empty, host/ is omitted.
|
||||
// - if u.Scheme and u.Host are empty and u.User is nil,
|
||||
// the entire scheme://userinfo@host/ is omitted.
|
||||
// - if u.Host is non-empty and u.Path begins with a /,
|
||||
// the form host/path does not add its own /.
|
||||
// - if u.RawQuery is empty, ?query is omitted.
|
||||
// - if u.Fragment is empty, #fragment is omitted.
|
||||
func Escape(u *url.URL) string {
|
||||
var buf bytes.Buffer
|
||||
if u.Scheme != "" {
|
||||
buf.WriteString(u.Scheme)
|
||||
buf.WriteByte(':')
|
||||
}
|
||||
if u.Opaque != "" {
|
||||
buf.WriteString(u.Opaque)
|
||||
} else {
|
||||
if u.Scheme != "" || u.Host != "" || u.User != nil {
|
||||
buf.WriteString("//")
|
||||
if ui := u.User; ui != nil {
|
||||
buf.WriteString(unescapeUserinfo(ui.String()))
|
||||
buf.WriteByte('@')
|
||||
}
|
||||
if h := u.Host; h != "" {
|
||||
buf.WriteString(h)
|
||||
}
|
||||
}
|
||||
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
|
||||
buf.WriteByte('/')
|
||||
}
|
||||
buf.WriteString(escape(u.Path, encodePath))
|
||||
}
|
||||
if u.RawQuery != "" {
|
||||
buf.WriteByte('?')
|
||||
buf.WriteString(u.RawQuery)
|
||||
}
|
||||
if u.Fragment != "" {
|
||||
buf.WriteByte('#')
|
||||
buf.WriteString(escape(u.Fragment, encodeFragment))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
1
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
1
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
|
@ -1,4 +1,5 @@
|
|||
language: go
|
||||
go_import_path: github.com/sirupsen/logrus
|
||||
env:
|
||||
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
||||
matrix:
|
||||
|
|
25
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
25
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,28 @@
|
|||
# 1.4.0
|
||||
This new release introduces:
|
||||
* Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
|
||||
* Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911)
|
||||
* Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
|
||||
|
||||
Fixes:
|
||||
* Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893).
|
||||
* Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903)
|
||||
* Fix infinite recursion on unknown `Level.String()` (#907)
|
||||
* Fix race condition in `getCaller` (#916).
|
||||
|
||||
|
||||
# 1.3.0
|
||||
This new release introduces:
|
||||
* Log, Logf, Logln functions for Logger and Entry that take a Level
|
||||
|
||||
Fixes:
|
||||
* Building prometheus node_exporter on AIX (#840)
|
||||
* Race condition in TextFormatter (#468)
|
||||
* Travis CI import path (#868)
|
||||
* Remove coloured output on Windows (#862)
|
||||
* Pointer to func as field in JSONFormatter (#870)
|
||||
* Properly marshal Levels (#873)
|
||||
|
||||
# 1.2.0
|
||||
This new release introduces:
|
||||
* A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
|
||||
|
|
2
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
2
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
|
@ -361,9 +361,11 @@ The built-in logging formatters are:
|
|||
Third party logging formatters:
|
||||
|
||||
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
|
||||
* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
|
||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
|
|
18
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
18
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
|
@ -51,9 +51,9 @@ func Exit(code int) {
|
|||
os.Exit(code)
|
||||
}
|
||||
|
||||
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
|
||||
// all handlers. The handlers will also be invoked when any Fatal log entry is
|
||||
// made.
|
||||
// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
|
||||
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
||||
// any Fatal log entry is made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
|
@ -62,3 +62,15 @@ func Exit(code int) {
|
|||
func RegisterExitHandler(handler func()) {
|
||||
handlers = append(handlers, handler)
|
||||
}
|
||||
|
||||
// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
|
||||
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
||||
// any Fatal log entry is made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
// closing database connections, or sending a alert that the application is
|
||||
// closing.
|
||||
func DeferExitHandler(handler func()) {
|
||||
handlers = append([]func(){handler}, handlers...)
|
||||
}
|
||||
|
|
158
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
158
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
|
@ -2,6 +2,7 @@ package logrus
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
|
@ -69,6 +70,9 @@ type Entry struct {
|
|||
// When formatter is called in entry.log(), a Buffer may be set to entry
|
||||
Buffer *bytes.Buffer
|
||||
|
||||
// Contains the context set by the user. Useful for hook processing etc.
|
||||
Context context.Context
|
||||
|
||||
// err may contain a field formatting error
|
||||
err string
|
||||
}
|
||||
|
@ -97,6 +101,12 @@ func (entry *Entry) WithError(err error) *Entry {
|
|||
return entry.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// Add a context to the Entry.
|
||||
func (entry *Entry) WithContext(ctx context.Context) *Entry {
|
||||
entry.Context = ctx
|
||||
return entry
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
|
@ -108,23 +118,34 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
|
|||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
var field_err string
|
||||
fieldErr := entry.err
|
||||
for k, v := range fields {
|
||||
if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func {
|
||||
field_err = fmt.Sprintf("can not add field %q", k)
|
||||
if entry.err != "" {
|
||||
field_err = entry.err + ", " + field_err
|
||||
isErrField := false
|
||||
if t := reflect.TypeOf(v); t != nil {
|
||||
switch t.Kind() {
|
||||
case reflect.Func:
|
||||
isErrField = true
|
||||
case reflect.Ptr:
|
||||
isErrField = t.Elem().Kind() == reflect.Func
|
||||
}
|
||||
}
|
||||
if isErrField {
|
||||
tmp := fmt.Sprintf("can not add field %q", k)
|
||||
if fieldErr != "" {
|
||||
fieldErr = entry.err + ", " + tmp
|
||||
} else {
|
||||
fieldErr = tmp
|
||||
}
|
||||
} else {
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err}
|
||||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
|
||||
}
|
||||
|
||||
// Overrides the time of the Entry.
|
||||
func (entry *Entry) WithTime(t time.Time) *Entry {
|
||||
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
|
||||
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context}
|
||||
}
|
||||
|
||||
// getPackageName reduces a fully qualified function name to the package name
|
||||
|
@ -145,20 +166,23 @@ func getPackageName(f string) string {
|
|||
|
||||
// getCaller retrieves the name of the first non-logrus calling function
|
||||
func getCaller() *runtime.Frame {
|
||||
|
||||
// cache this package's fully-qualified name
|
||||
callerInitOnce.Do(func() {
|
||||
pcs := make([]uintptr, 2)
|
||||
_ = runtime.Callers(0, pcs)
|
||||
logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name())
|
||||
|
||||
// now that we have the cache, we can skip a minimum count of known-logrus functions
|
||||
// XXX this is dubious, the number of frames may vary
|
||||
minimumCallerDepth = knownLogrusFrames
|
||||
})
|
||||
|
||||
// Restrict the lookback frames to avoid runaway lookups
|
||||
pcs := make([]uintptr, maximumCallerDepth)
|
||||
depth := runtime.Callers(minimumCallerDepth, pcs)
|
||||
frames := runtime.CallersFrames(pcs[:depth])
|
||||
|
||||
// cache this package's fully-qualified name
|
||||
callerInitOnce.Do(func() {
|
||||
logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name())
|
||||
|
||||
// now that we have the cache, we can skip a minimum count of known-logrus functions
|
||||
// XXX this is dubious, the number of frames may vary store an entry in a logger interface
|
||||
minimumCallerDepth = knownLogrusFrames
|
||||
})
|
||||
|
||||
for f, again := frames.Next(); again; f, again = frames.Next() {
|
||||
pkg := getPackageName(f.Function)
|
||||
|
||||
|
@ -240,16 +264,18 @@ func (entry *Entry) write() {
|
|||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Trace(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||
entry.log(TraceLevel, fmt.Sprint(args...))
|
||||
func (entry *Entry) Log(level Level, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.log(level, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Trace(args ...interface{}) {
|
||||
entry.Log(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
|
@ -257,15 +283,11 @@ func (entry *Entry) Print(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warning(args ...interface{}) {
|
||||
|
@ -273,43 +295,37 @@ func (entry *Entry) Warning(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(FatalLevel, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(PanicLevel, args...)
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Tracef(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||
entry.Trace(fmt.Sprintf(format, args...))
|
||||
func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.Log(level, fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Tracef(format string, args ...interface{}) {
|
||||
entry.Logf(TraceLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(DebugLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(InfoLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
|
@ -317,9 +333,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(WarnLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
|
@ -327,42 +341,36 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(ErrorLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(FatalLevel, format, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(PanicLevel, format, args...)
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Traceln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||
entry.Trace(entry.sprintlnn(args...))
|
||||
func (entry *Entry) Logln(level Level, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.Log(level, entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Traceln(args ...interface{}) {
|
||||
entry.Logln(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
|
@ -370,9 +378,7 @@ func (entry *Entry) Println(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
|
@ -380,22 +386,16 @@ func (entry *Entry) Warningln(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(FatalLevel, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(PanicLevel, args...)
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
|
|
6
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
6
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
@ -55,6 +56,11 @@ func WithError(err error) *Entry {
|
|||
return std.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// WithContext creates an entry from the standard logger and adds a context to it.
|
||||
func WithContext(ctx context.Context) *Entry {
|
||||
return std.WithContext(ctx)
|
||||
}
|
||||
|
||||
// WithField creates an entry from the standard logger and adds a field to
|
||||
// it. If you want multiple fields, use `WithFields`.
|
||||
//
|
||||
|
|
22
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
22
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type fieldKey string
|
||||
|
@ -42,6 +43,12 @@ type JSONFormatter struct {
|
|||
// }
|
||||
FieldMap FieldMap
|
||||
|
||||
// CallerPrettyfier can be set by the user to modify the content
|
||||
// of the function and file keys in the json data when ReportCaller is
|
||||
// activated. If any of the returned value is the empty string the
|
||||
// corresponding key will be removed from json fields.
|
||||
CallerPrettyfier func(*runtime.Frame) (function string, file string)
|
||||
|
||||
// PrettyPrint will indent all json logs
|
||||
PrettyPrint bool
|
||||
}
|
||||
|
@ -82,8 +89,17 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||
if entry.HasCaller() {
|
||||
data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function
|
||||
data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||
funcVal := entry.Caller.Function
|
||||
fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||
if f.CallerPrettyfier != nil {
|
||||
funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
|
||||
}
|
||||
if funcVal != "" {
|
||||
data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal
|
||||
}
|
||||
if fileVal != "" {
|
||||
data[f.FieldMap.resolve(FieldKeyFile)] = fileVal
|
||||
}
|
||||
}
|
||||
|
||||
var b *bytes.Buffer
|
||||
|
@ -98,7 +114,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
encoder.SetIndent("", " ")
|
||||
}
|
||||
if err := encoder.Encode(data); err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
|
||||
return b.Bytes(), nil
|
||||
|
|
162
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
162
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
@ -124,6 +125,13 @@ func (logger *Logger) WithError(err error) *Entry {
|
|||
return entry.WithError(err)
|
||||
}
|
||||
|
||||
// Add a context to the log entry.
|
||||
func (logger *Logger) WithContext(ctx context.Context) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithContext(ctx)
|
||||
}
|
||||
|
||||
// Overrides the time of the log entry.
|
||||
func (logger *Logger) WithTime(t time.Time) *Entry {
|
||||
entry := logger.newEntry()
|
||||
|
@ -131,28 +139,24 @@ func (logger *Logger) WithTime(t time.Time) *Entry {
|
|||
return entry.WithTime(t)
|
||||
}
|
||||
|
||||
func (logger *Logger) Tracef(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(TraceLevel) {
|
||||
func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(level) {
|
||||
entry := logger.newEntry()
|
||||
entry.Tracef(format, args...)
|
||||
entry.Logf(level, format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Tracef(format string, args ...interface{}) {
|
||||
logger.Logf(TraceLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(DebugLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(DebugLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(InfoLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Infof(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(InfoLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
|
@ -162,139 +166,91 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(WarnLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(ErrorLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(FatalLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(FatalLevel, format, args...)
|
||||
logger.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(PanicLevel) {
|
||||
logger.Logf(PanicLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Log(level Level, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(level) {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicf(format, args...)
|
||||
entry.Log(level, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Trace(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(TraceLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Trace(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(DebugLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Debug(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(InfoLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
entry.Print(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Warn(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Error(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(FatalLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatal(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(FatalLevel, args...)
|
||||
logger.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(PanicLevel) {
|
||||
logger.Log(PanicLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Logln(level Level, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(level) {
|
||||
entry := logger.newEntry()
|
||||
entry.Panic(args...)
|
||||
entry.Logln(level, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Traceln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(TraceLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Traceln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(DebugLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(InfoLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Infoln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
|
@ -304,44 +260,24 @@ func (logger *Logger) Println(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Warnln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(FatalLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(FatalLevel, args...)
|
||||
logger.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(PanicLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(PanicLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Exit(code int) {
|
||||
|
|
42
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
42
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
|
@ -14,24 +14,11 @@ type Level uint32
|
|||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
switch level {
|
||||
case TraceLevel:
|
||||
return "trace"
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case WarnLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
case PanicLevel:
|
||||
return "panic"
|
||||
if b, err := level.MarshalText(); err == nil {
|
||||
return string(b)
|
||||
} else {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
|
@ -69,6 +56,27 @@ func (level *Level) UnmarshalText(text []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (level Level) MarshalText() ([]byte, error) {
|
||||
switch level {
|
||||
case TraceLevel:
|
||||
return []byte("trace"), nil
|
||||
case DebugLevel:
|
||||
return []byte("debug"), nil
|
||||
case InfoLevel:
|
||||
return []byte("info"), nil
|
||||
case WarnLevel:
|
||||
return []byte("warning"), nil
|
||||
case ErrorLevel:
|
||||
return []byte("error"), nil
|
||||
case FatalLevel:
|
||||
return []byte("fatal"), nil
|
||||
case PanicLevel:
|
||||
return []byte("panic"), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("not a valid logrus level %d", level)
|
||||
}
|
||||
|
||||
// A constant exposing all logging levels
|
||||
var AllLevels = []Level{
|
||||
PanicLevel,
|
||||
|
|
9
vendor/github.com/Sirupsen/logrus/terminal_check_aix.go
generated
vendored
Normal file
9
vendor/github.com/Sirupsen/logrus/terminal_check_aix.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
// +build !appengine,!js,!windows,aix
|
||||
|
||||
package logrus
|
||||
|
||||
import "io"
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
return false
|
||||
}
|
2
vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
2
vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !appengine,!js,!windows
|
||||
// +build !appengine,!js,!windows,!aix
|
||||
|
||||
package logrus
|
||||
|
||||
|
|
67
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
67
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -11,18 +12,13 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
nocolor = 0
|
||||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 36
|
||||
gray = 37
|
||||
red = 31
|
||||
yellow = 33
|
||||
blue = 36
|
||||
gray = 37
|
||||
)
|
||||
|
||||
var (
|
||||
baseTimestamp time.Time
|
||||
emptyFieldMap FieldMap
|
||||
)
|
||||
var baseTimestamp time.Time
|
||||
|
||||
func init() {
|
||||
baseTimestamp = time.Now()
|
||||
|
@ -76,6 +72,12 @@ type TextFormatter struct {
|
|||
// FieldKeyMsg: "@message"}}
|
||||
FieldMap FieldMap
|
||||
|
||||
// CallerPrettyfier can be set by the user to modify the content
|
||||
// of the function and file keys in the json data when ReportCaller is
|
||||
// activated. If any of the returned value is the empty string the
|
||||
// corresponding key will be removed from json fields.
|
||||
CallerPrettyfier func(*runtime.Frame) (function string, file string)
|
||||
|
||||
terminalInitOnce sync.Once
|
||||
}
|
||||
|
||||
|
@ -90,7 +92,7 @@ func (f *TextFormatter) init(entry *Entry) {
|
|||
}
|
||||
|
||||
func (f *TextFormatter) isColored() bool {
|
||||
isColored := f.ForceColors || f.isTerminal
|
||||
isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
|
||||
|
||||
if f.EnvironmentOverrideColors {
|
||||
if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
|
||||
|
@ -107,14 +109,19 @@ func (f *TextFormatter) isColored() bool {
|
|||
|
||||
// Format renders a single log entry
|
||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
prefixFieldClashes(entry.Data, f.FieldMap, entry.HasCaller())
|
||||
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
data := make(Fields)
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
|
||||
keys := make([]string, 0, len(data))
|
||||
for k := range data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
fixedKeys := make([]string, 0, 4+len(entry.Data))
|
||||
var funcVal, fileVal string
|
||||
|
||||
fixedKeys := make([]string, 0, 4+len(data))
|
||||
if !f.DisableTimestamp {
|
||||
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
|
||||
}
|
||||
|
@ -128,6 +135,12 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
if entry.HasCaller() {
|
||||
fixedKeys = append(fixedKeys,
|
||||
f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile))
|
||||
if f.CallerPrettyfier != nil {
|
||||
funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
|
||||
} else {
|
||||
funcVal = entry.Caller.Function
|
||||
fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||
}
|
||||
}
|
||||
|
||||
if !f.DisableSorting {
|
||||
|
@ -160,8 +173,9 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if f.isColored() {
|
||||
f.printColored(b, entry, keys, timestampFormat)
|
||||
f.printColored(b, entry, keys, data, timestampFormat)
|
||||
} else {
|
||||
|
||||
for _, key := range fixedKeys {
|
||||
var value interface{}
|
||||
switch {
|
||||
|
@ -174,11 +188,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
case key == f.FieldMap.resolve(FieldKeyLogrusError):
|
||||
value = entry.err
|
||||
case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
|
||||
value = entry.Caller.Function
|
||||
value = funcVal
|
||||
case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
|
||||
value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||
value = fileVal
|
||||
default:
|
||||
value = entry.Data[key]
|
||||
value = data[key]
|
||||
}
|
||||
f.appendKeyValue(b, key, value)
|
||||
}
|
||||
|
@ -188,7 +202,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case DebugLevel, TraceLevel:
|
||||
|
@ -213,8 +227,13 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
|
|||
caller := ""
|
||||
|
||||
if entry.HasCaller() {
|
||||
caller = fmt.Sprintf("%s:%d %s()",
|
||||
entry.Caller.File, entry.Caller.Line, entry.Caller.Function)
|
||||
funcVal := fmt.Sprintf("%s()", entry.Caller.Function)
|
||||
fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||
|
||||
if f.CallerPrettyfier != nil {
|
||||
funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
|
||||
}
|
||||
caller = fileVal + " " + funcVal
|
||||
}
|
||||
|
||||
if f.DisableTimestamp {
|
||||
|
@ -225,7 +244,7 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
|
|||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
v := entry.Data[k]
|
||||
v := data[k]
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
||||
f.appendValue(b, v)
|
||||
}
|
||||
|
|
32
vendor/github.com/armon/go-proxyproto/protocol.go
generated
vendored
32
vendor/github.com/armon/go-proxyproto/protocol.go
generated
vendored
|
@ -59,7 +59,7 @@ type Conn struct {
|
|||
conn net.Conn
|
||||
dstAddr *net.TCPAddr
|
||||
srcAddr *net.TCPAddr
|
||||
useConnRemoteAddr bool
|
||||
useConnAddr bool
|
||||
once sync.Once
|
||||
proxyHeaderTimeout time.Duration
|
||||
}
|
||||
|
@ -71,18 +71,18 @@ func (p *Listener) Accept() (net.Conn, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var useConnRemoteAddr bool
|
||||
var useConnAddr bool
|
||||
if p.SourceCheck != nil {
|
||||
allowed, err := p.SourceCheck(conn.RemoteAddr())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !allowed {
|
||||
useConnRemoteAddr = true
|
||||
useConnAddr = true
|
||||
}
|
||||
}
|
||||
newConn := NewConn(conn, p.ProxyHeaderTimeout)
|
||||
newConn.useConnRemoteAddr = useConnRemoteAddr
|
||||
newConn.useConnAddr = useConnAddr
|
||||
return newConn, nil
|
||||
}
|
||||
|
||||
|
@ -128,6 +128,10 @@ func (p *Conn) Close() error {
|
|||
}
|
||||
|
||||
func (p *Conn) LocalAddr() net.Addr {
|
||||
p.checkPrefixOnce()
|
||||
if p.dstAddr != nil && !p.useConnAddr {
|
||||
return p.dstAddr
|
||||
}
|
||||
return p.conn.LocalAddr()
|
||||
}
|
||||
|
||||
|
@ -139,14 +143,8 @@ func (p *Conn) LocalAddr() net.Addr {
|
|||
// client is slow. Using a Deadline is recommended if this is called
|
||||
// before Read()
|
||||
func (p *Conn) RemoteAddr() net.Addr {
|
||||
p.once.Do(func() {
|
||||
if err := p.checkPrefix(); err != nil && err != io.EOF {
|
||||
log.Printf("[ERR] Failed to read proxy prefix: %v", err)
|
||||
p.Close()
|
||||
p.bufReader = bufio.NewReader(p.conn)
|
||||
}
|
||||
})
|
||||
if p.srcAddr != nil && !p.useConnRemoteAddr {
|
||||
p.checkPrefixOnce()
|
||||
if p.srcAddr != nil && !p.useConnAddr {
|
||||
return p.srcAddr
|
||||
}
|
||||
return p.conn.RemoteAddr()
|
||||
|
@ -164,6 +162,16 @@ func (p *Conn) SetWriteDeadline(t time.Time) error {
|
|||
return p.conn.SetWriteDeadline(t)
|
||||
}
|
||||
|
||||
func (p *Conn) checkPrefixOnce() {
|
||||
p.once.Do(func() {
|
||||
if err := p.checkPrefix(); err != nil && err != io.EOF {
|
||||
log.Printf("[ERR] Failed to read proxy prefix: %v", err)
|
||||
p.Close()
|
||||
p.bufReader = bufio.NewReader(p.conn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Conn) checkPrefix() error {
|
||||
if p.proxyHeaderTimeout != 0 {
|
||||
readDeadLine := time.Now().Add(p.proxyHeaderTimeout)
|
||||
|
|
5
vendor/github.com/census-instrumentation/opencensus-proto/.travis.yml
generated
vendored
5
vendor/github.com/census-instrumentation/opencensus-proto/.travis.yml
generated
vendored
|
@ -23,13 +23,15 @@ install:
|
|||
- case "$BUILD" in
|
||||
"BAZEL")
|
||||
sudo apt-get install bazel ;
|
||||
echo "build --disk_cache=$HOME/bazel-cache" > ~/.bazelrc ;
|
||||
echo "build --experimental_strict_action_env" >> ~/.bazelrc ;
|
||||
bazel version;;
|
||||
esac
|
||||
|
||||
script:
|
||||
- case "$BUILD" in
|
||||
"BAZEL")
|
||||
cd src;
|
||||
cd src ;
|
||||
bazel build --show_result=100 ... ;;
|
||||
"GRADLE")
|
||||
./gradlew clean assemble --stacktrace ;
|
||||
|
@ -47,3 +49,4 @@ cache:
|
|||
- $HOME/.gradle
|
||||
- $HOME/.gradle/caches/
|
||||
- $HOME/.gradle/wrapper/
|
||||
- $HOME/bazel-cache/
|
||||
|
|
15
vendor/github.com/census-instrumentation/opencensus-proto/README.md
generated
vendored
15
vendor/github.com/census-instrumentation/opencensus-proto/README.md
generated
vendored
|
@ -1,7 +1,10 @@
|
|||
OpenCensus Proto - Language Independent Interface Types For OpenCensus
|
||||
===============================================================
|
||||
[![Gitter chat][gitter-image]][gitter-url]
|
||||
[![Build Status][travis-image]][travis-url]
|
||||
[![Maven Central][maven-image]][maven-url]
|
||||
[![Javadocs][javadoc-image]][javadoc-url]
|
||||
[![GoDoc][godoc-image]][godoc-url]
|
||||
|
||||
Census provides a framework to define and collect stats against metrics and to
|
||||
break those stats down across user-defined dimensions.
|
||||
|
@ -31,19 +34,25 @@ For Maven add to `pom.xml`:
|
|||
<dependency>
|
||||
<groupId>io.opencensus</groupId>
|
||||
<artifactId>opencensus-proto</artifactId>
|
||||
<version>0.0.2</version>
|
||||
<version>0.1.0</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
For Gradle add to dependencies:
|
||||
```gradle
|
||||
compile 'io.opencensus:opencensus-proto:0.0.2'
|
||||
compile 'io.opencensus:opencensus-proto:0.1.0'
|
||||
```
|
||||
|
||||
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-proto.svg?branch=master
|
||||
[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-proto
|
||||
[maven-image]: https://maven-badges.herokuapp.com/maven-central/io.opencensus/opencensus-proto/badge.svg
|
||||
[maven-url]: https://maven-badges.herokuapp.com/maven-central/io.opencensus/opencensus-proto
|
||||
[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg
|
||||
[gitter-url]: https://gitter.im/census-instrumentation/lobby
|
||||
[javadoc-image]: https://www.javadoc.io/badge/io.opencensus/opencensus-proto.svg
|
||||
[javadoc-url]: https://www.javadoc.io/doc/io.opencensus/opencensus-proto
|
||||
[godoc-image]: https://godoc.org/github.com/census-instrumentation/opencensus-proto?status.svg
|
||||
[godoc-url]: https://godoc.org/github.com/census-instrumentation/opencensus-proto
|
||||
|
||||
### Add the dependencies to Bazel project
|
||||
|
||||
|
@ -52,7 +61,7 @@ In WORKSPACE, add:
|
|||
git_repository(
|
||||
name = "io_opencensus_proto",
|
||||
strip_prefix = "src",
|
||||
tag = "v0.0.2", # CURRENT_OPENCENSUS_PROTO_VERSION
|
||||
tag = "v0.1.0", # CURRENT_OPENCENSUS_PROTO_VERSION
|
||||
remote = "https://github.com/census-instrumentation/opencensus-proto",
|
||||
)
|
||||
```
|
||||
|
|
6
vendor/github.com/census-instrumentation/opencensus-proto/RELEASING.md
generated
vendored
6
vendor/github.com/census-instrumentation/opencensus-proto/RELEASING.md
generated
vendored
|
@ -10,9 +10,9 @@ Other systems may also work, but we haven't verified them.
|
|||
|
||||
## Release Go files
|
||||
|
||||
To generate the Go files from protos, you'll need to install protoc and protoc-gen-go plugin first.
|
||||
Follow the instructions [here](http://google.github.io/proto-lens/installing-protoc.html) and
|
||||
[here](https://github.com/golang/protobuf#installation).
|
||||
To generate the Go files from protos, you'll need to install protoc, protoc-gen-go and grpc-gateway plugins first.
|
||||
Follow the instructions [here](http://google.github.io/proto-lens/installing-protoc.html),
|
||||
[here](https://github.com/golang/protobuf#installation) and [here](https://github.com/grpc-ecosystem/grpc-gateway#installation).
|
||||
|
||||
Then run the following commands to re-generate the gen-go files:
|
||||
|
||||
|
|
3
vendor/github.com/census-instrumentation/opencensus-proto/build.gradle
generated
vendored
3
vendor/github.com/census-instrumentation/opencensus-proto/build.gradle
generated
vendored
|
@ -7,7 +7,7 @@ apply plugin: 'maven'
|
|||
apply plugin: "signing"
|
||||
|
||||
group = "io.opencensus"
|
||||
version = "0.1.0" // CURRENT_OPENCENSUS_PROTO_VERSION
|
||||
version = "0.3.0" // CURRENT_OPENCENSUS_PROTO_VERSION
|
||||
|
||||
sourceCompatibility = 1.6
|
||||
targetCompatibility = 1.6
|
||||
|
@ -89,6 +89,7 @@ clean {
|
|||
idea {
|
||||
module {
|
||||
sourceDirs += file("${protobuf.generatedFilesBaseDir}/main/java");
|
||||
sourceDirs += file("${protobuf.generatedFilesBaseDir}/main/grpc");
|
||||
// If you have additional sourceSets and/or codegen plugins, add all of them
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ var _ = math.Inf
|
|||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type LibraryInfo_Language int32
|
||||
|
||||
|
@ -70,8 +70,8 @@ func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) {
|
|||
return fileDescriptor_126c72ed8a252c84, []int{2, 0}
|
||||
}
|
||||
|
||||
// Identifier metadata of the Node (Application instrumented with OpenCensus)
|
||||
// that connects to OpenCensus Agent.
|
||||
// Identifier metadata of the Node that produces the span or tracing data.
|
||||
// Note, this is not the metadata about the Node or service that is described by associated spans.
|
||||
// In the future we plan to extend the identifier proto definition to support
|
||||
// additional information (e.g cloud id, etc.)
|
||||
type Node struct {
|
||||
|
|
264
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go
generated
vendored
Normal file
264
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,264 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: opencensus/proto/agent/metrics/v1/metrics_service.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
||||
v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
||||
v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
grpc "google.golang.org/grpc"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type ExportMetricsServiceRequest struct {
|
||||
// This is required only in the first message on the stream or if the
|
||||
// previous sent ExportMetricsServiceRequest message has a different Node (e.g.
|
||||
// when the same RPC is used to send Metrics from multiple Applications).
|
||||
Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
|
||||
// A list of metrics that belong to the last received Node.
|
||||
Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
|
||||
// The resource for the metrics in this message that do not have an explicit
|
||||
// resource set.
|
||||
// If unset, the most recently set resource in the RPC stream applies. It is
|
||||
// valid to never be set within a stream, e.g. when no resource info is known
|
||||
// at all or when all sent metrics have an explicit resource set.
|
||||
Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} }
|
||||
func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExportMetricsServiceRequest) ProtoMessage() {}
|
||||
func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_47e253a956287d04, []int{0}
|
||||
}
|
||||
|
||||
func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ExportMetricsServiceRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ExportMetricsServiceRequest.Size(m)
|
||||
}
|
||||
func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ExportMetricsServiceRequest) GetNode() *v1.Node {
|
||||
if m != nil {
|
||||
return m.Node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric {
|
||||
if m != nil {
|
||||
return m.Metrics
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource {
|
||||
if m != nil {
|
||||
return m.Resource
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExportMetricsServiceResponse struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} }
|
||||
func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExportMetricsServiceResponse) ProtoMessage() {}
|
||||
func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_47e253a956287d04, []int{1}
|
||||
}
|
||||
|
||||
func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ExportMetricsServiceResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ExportMetricsServiceResponse.Size(m)
|
||||
}
|
||||
func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest")
|
||||
proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04)
|
||||
}
|
||||
|
||||
var fileDescriptor_47e253a956287d04 = []byte{
|
||||
// 340 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xc1, 0x4a, 0xf3, 0x40,
|
||||
0x14, 0x85, 0xff, 0xf9, 0x2b, 0x55, 0xa6, 0xe0, 0x62, 0xdc, 0x94, 0x2a, 0x52, 0xab, 0x48, 0x45,
|
||||
0x32, 0x63, 0xea, 0x42, 0x10, 0x54, 0x28, 0xb8, 0x11, 0x94, 0x12, 0x77, 0x6e, 0xa4, 0x4d, 0x2f,
|
||||
0x71, 0x16, 0x99, 0x1b, 0x67, 0x26, 0xc1, 0x57, 0x70, 0xe5, 0x3b, 0xf8, 0x5c, 0x3e, 0x8c, 0x24,
|
||||
0x93, 0xb4, 0x94, 0x18, 0x0b, 0xee, 0x2e, 0x99, 0xf3, 0x9d, 0x9c, 0x33, 0x73, 0xe9, 0x05, 0x26,
|
||||
0xa0, 0x42, 0x50, 0x26, 0x35, 0x22, 0xd1, 0x68, 0x51, 0x4c, 0x23, 0x50, 0x56, 0xc4, 0x60, 0xb5,
|
||||
0x0c, 0x8d, 0xc8, 0xfc, 0x6a, 0x7c, 0x36, 0xa0, 0x33, 0x19, 0x02, 0x2f, 0x64, 0xec, 0x60, 0x09,
|
||||
0xba, 0x2f, 0xbc, 0x00, 0x79, 0xa9, 0xe6, 0x99, 0xdf, 0xf3, 0x1a, 0xbc, 0x43, 0x8c, 0x63, 0x54,
|
||||
0xb9, 0xb5, 0x9b, 0x1c, 0xdf, 0x3b, 0xa9, 0xc9, 0xeb, 0x21, 0x4a, 0xe9, 0x69, 0x4d, 0xaa, 0xc1,
|
||||
0x60, 0xaa, 0x43, 0xc8, 0xb5, 0xd5, 0xec, 0xc4, 0x83, 0x2f, 0x42, 0x77, 0x6f, 0xdf, 0x12, 0xd4,
|
||||
0xf6, 0xde, 0x99, 0x3c, 0xba, 0x22, 0x01, 0xbc, 0xa6, 0x60, 0x2c, 0xbb, 0xa4, 0x1b, 0x0a, 0xe7,
|
||||
0xd0, 0x25, 0x7d, 0x32, 0xec, 0x8c, 0x8e, 0x79, 0x43, 0xb1, 0x32, 0x6b, 0xe6, 0xf3, 0x07, 0x9c,
|
||||
0x43, 0x50, 0x30, 0xec, 0x8a, 0x6e, 0x96, 0xc9, 0xba, 0xff, 0xfb, 0xad, 0x61, 0x67, 0x74, 0x58,
|
||||
0xc7, 0x97, 0x37, 0xc2, 0x5d, 0x80, 0xa0, 0x62, 0xd8, 0x98, 0x6e, 0x55, 0x61, 0xbb, 0xad, 0xa6,
|
||||
0xdf, 0x2f, 0xea, 0x64, 0x3e, 0x0f, 0xca, 0x39, 0x58, 0x70, 0x83, 0x7d, 0xba, 0xf7, 0x73, 0x3b,
|
||||
0x93, 0xa0, 0x32, 0x30, 0xfa, 0x24, 0x74, 0x7b, 0xf5, 0x88, 0x7d, 0x10, 0xda, 0x76, 0x0c, 0xbb,
|
||||
0xe6, 0x6b, 0xdf, 0x91, 0xff, 0x72, 0x79, 0xbd, 0x9b, 0x3f, 0xf3, 0x2e, 0xde, 0xe0, 0xdf, 0x90,
|
||||
0x9c, 0x91, 0xf1, 0x3b, 0xa1, 0x47, 0x12, 0xd7, 0x7b, 0x8d, 0x77, 0x56, 0x6d, 0x26, 0xb9, 0x6a,
|
||||
0x42, 0x9e, 0xee, 0x22, 0x69, 0x5f, 0xd2, 0x59, 0xfe, 0x48, 0xc2, 0x19, 0x78, 0x52, 0x19, 0xab,
|
||||
0xd3, 0x18, 0x94, 0x9d, 0x5a, 0x89, 0x4a, 0x2c, 0xbd, 0x3d, 0xb7, 0x32, 0x11, 0x28, 0x2f, 0xaa,
|
||||
0xef, 0xfb, 0xac, 0x5d, 0x1c, 0x9f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x16, 0x61, 0x3b, 0xc3,
|
||||
0x1b, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// MetricsServiceClient is the client API for MetricsService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type MetricsServiceClient interface {
|
||||
// For performance reasons, it is recommended to keep this RPC
|
||||
// alive for the entire life of the application.
|
||||
Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error)
|
||||
}
|
||||
|
||||
type metricsServiceClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient {
|
||||
return &metricsServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &metricsServiceExportClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type MetricsService_ExportClient interface {
|
||||
Send(*ExportMetricsServiceRequest) error
|
||||
Recv() (*ExportMetricsServiceResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type metricsServiceExportClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) {
|
||||
m := new(ExportMetricsServiceResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// MetricsServiceServer is the server API for MetricsService service.
|
||||
type MetricsServiceServer interface {
|
||||
// For performance reasons, it is recommended to keep this RPC
|
||||
// alive for the entire life of the application.
|
||||
Export(MetricsService_ExportServer) error
|
||||
}
|
||||
|
||||
func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
|
||||
s.RegisterService(&_MetricsService_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream})
|
||||
}
|
||||
|
||||
type MetricsService_ExportServer interface {
|
||||
Send(*ExportMetricsServiceResponse) error
|
||||
Recv() (*ExportMetricsServiceRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type metricsServiceExportServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) {
|
||||
m := new(ExportMetricsServiceRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
var _MetricsService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService",
|
||||
HandlerType: (*MetricsServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "Export",
|
||||
Handler: _MetricsService_Export_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto",
|
||||
}
|
|
@ -4,12 +4,12 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
||||
v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
|
||||
v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
math "math"
|
||||
)
|
||||
|
@ -23,7 +23,7 @@ var _ = math.Inf
|
|||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type CurrentLibraryConfig struct {
|
||||
// This is required only in the first message on the stream or if the
|
||||
|
|
154
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go
generated
vendored
Normal file
154
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go
generated
vendored
Normal file
|
@ -0,0 +1,154 @@
|
|||
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
||||
// source: opencensus/proto/agent/trace/v1/trace_service.proto
|
||||
|
||||
/*
|
||||
Package v1 is a reverse proxy.
|
||||
|
||||
It translates gRPC into RESTful JSON APIs.
|
||||
*/
|
||||
package v1
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var _ codes.Code
|
||||
var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
|
||||
func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) {
|
||||
var metadata runtime.ServerMetadata
|
||||
stream, err := client.Export(ctx)
|
||||
if err != nil {
|
||||
grpclog.Infof("Failed to start streaming: %v", err)
|
||||
return nil, metadata, err
|
||||
}
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, berr
|
||||
}
|
||||
dec := marshaler.NewDecoder(newReader())
|
||||
handleSend := func() error {
|
||||
var protoReq ExportTraceServiceRequest
|
||||
err := dec.Decode(&protoReq)
|
||||
if err == io.EOF {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
grpclog.Infof("Failed to decode request: %v", err)
|
||||
return err
|
||||
}
|
||||
if err := stream.Send(&protoReq); err != nil {
|
||||
grpclog.Infof("Failed to send request: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := handleSend(); err != nil {
|
||||
if cerr := stream.CloseSend(); cerr != nil {
|
||||
grpclog.Infof("Failed to terminate client stream: %v", cerr)
|
||||
}
|
||||
if err == io.EOF {
|
||||
return stream, metadata, nil
|
||||
}
|
||||
return nil, metadata, err
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
if err := handleSend(); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := stream.CloseSend(); err != nil {
|
||||
grpclog.Infof("Failed to terminate client stream: %v", err)
|
||||
}
|
||||
}()
|
||||
header, err := stream.Header()
|
||||
if err != nil {
|
||||
grpclog.Infof("Failed to get header from client: %v", err)
|
||||
return nil, metadata, err
|
||||
}
|
||||
metadata.HeaderMD = header
|
||||
return stream, metadata, nil
|
||||
}
|
||||
|
||||
// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterTraceServiceHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn))
|
||||
}
|
||||
|
||||
// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService
|
||||
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "TraceServiceClient" to call the correct interceptors.
|
||||
func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error {
|
||||
|
||||
mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_TraceService_Export_0 = runtime.ForwardResponseStream
|
||||
)
|
1126
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
generated
vendored
Normal file
1126
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
|
@ -18,7 +18,7 @@ var _ = math.Inf
|
|||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// Resource information.
|
||||
type Resource struct {
|
||||
|
|
457
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
generated
vendored
457
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
generated
vendored
|
@ -5,6 +5,7 @@ package v1
|
|||
|
||||
import (
|
||||
fmt "fmt"
|
||||
v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
wrappers "github.com/golang/protobuf/ptypes/wrappers"
|
||||
|
@ -20,7 +21,7 @@ var _ = math.Inf
|
|||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// Type of span. Can be used to specify additional relationships between spans
|
||||
// in addition to a parent/child relationship.
|
||||
|
@ -124,22 +125,31 @@ func (Span_Link_Type) EnumDescriptor() ([]byte, []int) {
|
|||
}
|
||||
|
||||
// A span represents a single operation within a trace. Spans can be
|
||||
// nested to form a trace tree. Often, a trace contains a root span
|
||||
// that describes the end-to-end latency, and one or more subspans for
|
||||
// its sub-operations. A trace can also contain multiple root spans,
|
||||
// or none at all. Spans do not need to be contiguous - there may be
|
||||
// gaps or overlaps between spans in a trace.
|
||||
// nested to form a trace tree. Spans may also be linked to other spans
|
||||
// from the same or different trace. And form graphs. Often, a trace
|
||||
// contains a root span that describes the end-to-end latency, and one
|
||||
// or more subspans for its sub-operations. A trace can also contain
|
||||
// multiple root spans, or none at all. Spans do not need to be
|
||||
// contiguous - there may be gaps or overlaps between spans in a trace.
|
||||
//
|
||||
// The next id is 16.
|
||||
// The next id is 17.
|
||||
// TODO(bdrutu): Add an example.
|
||||
type Span struct {
|
||||
// A unique identifier for a trace. All spans from the same trace share
|
||||
// the same `trace_id`. The ID is a 16-byte array.
|
||||
// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes
|
||||
// is considered invalid.
|
||||
//
|
||||
// This field is semantically required. Receiver should generate new
|
||||
// random trace_id if empty or invalid trace_id was received.
|
||||
//
|
||||
// This field is required.
|
||||
TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
|
||||
// A unique identifier for a span within a trace, assigned when the span
|
||||
// is created. The ID is an 8-byte array.
|
||||
// is created. The ID is an 8-byte array. An ID with all zeroes is considered
|
||||
// invalid.
|
||||
//
|
||||
// This field is semantically required. Receiver should generate new
|
||||
// random span_id if empty or invalid span_id was received.
|
||||
//
|
||||
// This field is required.
|
||||
SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
|
||||
|
@ -155,19 +165,37 @@ type Span struct {
|
|||
// the same display name at the same call point in an application.
|
||||
// This makes it easier to correlate spans in different traces.
|
||||
//
|
||||
// This field is semantically required to be set to non-empty string.
|
||||
// When null or empty string received - receiver may use string "name"
|
||||
// as a replacement. There might be smarted algorithms implemented by
|
||||
// receiver to fix the empty span name.
|
||||
//
|
||||
// This field is required.
|
||||
Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Distinguishes between spans generated in a particular context. For example,
|
||||
// two spans with the same name may be distinguished using `CLIENT`
|
||||
// and `SERVER` to identify queueing latency associated with the span.
|
||||
// two spans with the same name may be distinguished using `CLIENT` (caller)
|
||||
// and `SERVER` (callee) to identify queueing latency associated with the span.
|
||||
Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
|
||||
// The start time of the span. On the client side, this is the time kept by
|
||||
// the local machine where the span execution starts. On the server side, this
|
||||
// is the time when the server's application handler starts running.
|
||||
//
|
||||
// This field is semantically required. When not set on receive -
|
||||
// receiver should set it to the value of end_time field if it was
|
||||
// set. Or to the current time if neither was set. It is important to
|
||||
// keep end_time > start_time for consistency.
|
||||
//
|
||||
// This field is required.
|
||||
StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
||||
// The end time of the span. On the client side, this is the time kept by
|
||||
// the local machine where the span execution ends. On the server side, this
|
||||
// is the time when the server application handler stops running.
|
||||
//
|
||||
// This field is semantically required. When not set on receive -
|
||||
// receiver should set it to start_time value. It is important to
|
||||
// keep end_time > start_time for consistency.
|
||||
//
|
||||
// This field is required.
|
||||
EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
|
||||
// A set of attributes on the span.
|
||||
Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"`
|
||||
|
@ -177,11 +205,19 @@ type Span struct {
|
|||
TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"`
|
||||
// The included links.
|
||||
Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"`
|
||||
// An optional final status for this span.
|
||||
// An optional final status for this span. Semantically when Status
|
||||
// wasn't set it is means span ended without errors and assume
|
||||
// Status.Ok (code = 0).
|
||||
Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"`
|
||||
// A highly recommended but not required flag that identifies when a trace
|
||||
// crosses a process boundary. True when the parent_span belongs to the
|
||||
// same process as the current span.
|
||||
// An optional resource that is associated with this span. If not set, this span
|
||||
// should be part of a batch that does include the resource information, unless resource
|
||||
// information is unknown.
|
||||
Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"`
|
||||
// A highly recommended but not required flag that identifies when a
|
||||
// trace crosses a process boundary. True when the parent_span belongs
|
||||
// to the same process as the current span. This flag is most commonly
|
||||
// used to indicate the need to adjust time as clocks in different
|
||||
// processes may not be synchronized.
|
||||
SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"`
|
||||
// An optional number of child spans that were generated while this span
|
||||
// was active. If set, allows an implementation to detect missing child spans.
|
||||
|
@ -307,6 +343,13 @@ func (m *Span) GetStatus() *Status {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Span) GetResource() *v1.Resource {
|
||||
if m != nil {
|
||||
return m.Resource
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue {
|
||||
if m != nil {
|
||||
return m.SameProcessAsParentSpan
|
||||
|
@ -420,13 +463,14 @@ func (m *Span_Tracestate_Entry) GetValue() string {
|
|||
|
||||
// A set of attributes, each with a key and a value.
|
||||
type Span_Attributes struct {
|
||||
// The set of attributes. The value can be a string, an integer, or the
|
||||
// Boolean values `true` and `false`. For example:
|
||||
// The set of attributes. The value can be a string, an integer, a double
|
||||
// or the Boolean values `true` or `false`. Note, global attributes like
|
||||
// server name can be set as tags using resource API. Examples of attributes:
|
||||
//
|
||||
// "/instance_id": "my-instance"
|
||||
// "/http/user_agent": ""
|
||||
// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
|
||||
// "/http/server_latency": 300
|
||||
// "abc.com/myattribute": true
|
||||
// "abc.com/score": 10.239
|
||||
AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// The number of attributes that were discarded. Attributes can be discarded
|
||||
// because their keys are too long or because there are too many attributes.
|
||||
|
@ -561,80 +605,14 @@ func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent {
|
|||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*Span_TimeEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _Span_TimeEvent_OneofMarshaler, _Span_TimeEvent_OneofUnmarshaler, _Span_TimeEvent_OneofSizer, []interface{}{
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*Span_TimeEvent_Annotation_)(nil),
|
||||
(*Span_TimeEvent_MessageEvent_)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _Span_TimeEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*Span_TimeEvent)
|
||||
// value
|
||||
switch x := m.Value.(type) {
|
||||
case *Span_TimeEvent_Annotation_:
|
||||
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.Annotation); err != nil {
|
||||
return err
|
||||
}
|
||||
case *Span_TimeEvent_MessageEvent_:
|
||||
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.MessageEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("Span_TimeEvent.Value has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _Span_TimeEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*Span_TimeEvent)
|
||||
switch tag {
|
||||
case 2: // value.annotation
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(Span_TimeEvent_Annotation)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Value = &Span_TimeEvent_Annotation_{msg}
|
||||
return true, err
|
||||
case 3: // value.message_event
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(Span_TimeEvent_MessageEvent)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Value = &Span_TimeEvent_MessageEvent_{msg}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _Span_TimeEvent_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*Span_TimeEvent)
|
||||
// value
|
||||
switch x := m.Value.(type) {
|
||||
case *Span_TimeEvent_Annotation_:
|
||||
s := proto.Size(x.Annotation)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *Span_TimeEvent_MessageEvent_:
|
||||
s := proto.Size(x.MessageEvent)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// A text annotation with a set of attributes.
|
||||
type Span_TimeEvent_Annotation struct {
|
||||
// A user-supplied message describing the event.
|
||||
|
@ -826,11 +804,10 @@ func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 {
|
|||
// where a single batch handler processes multiple requests from different
|
||||
// traces or when the handler receives a request from a different project.
|
||||
type Span_Link struct {
|
||||
// A unique identifier for a trace. All spans from the same trace share
|
||||
// the same `trace_id`. The ID is a 16-byte array.
|
||||
// A unique identifier of a trace that this linked span is part of. The ID is a
|
||||
// 16-byte array.
|
||||
TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
|
||||
// A unique identifier for a span within a trace, assigned when the span
|
||||
// is created. The ID is an 8-byte array.
|
||||
// A unique identifier for the linked span. The ID is an 8-byte array.
|
||||
SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
|
||||
// The relationship of the current span relative to the linked span.
|
||||
Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"`
|
||||
|
@ -952,7 +929,8 @@ func (m *Span_Links) GetDroppedLinksCount() int32 {
|
|||
// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto),
|
||||
// which is used by [gRPC](https://github.com/grpc).
|
||||
type Status struct {
|
||||
// The status code.
|
||||
// The status code. This is optional field. It is safe to assume 0 (OK)
|
||||
// when not set.
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
// A developer-facing error message, which should be in English.
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
|
@ -1103,9 +1081,9 @@ func (m *AttributeValue) GetDoubleValue() float64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*AttributeValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _AttributeValue_OneofMarshaler, _AttributeValue_OneofUnmarshaler, _AttributeValue_OneofSizer, []interface{}{
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*AttributeValue) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*AttributeValue_StringValue)(nil),
|
||||
(*AttributeValue_IntValue)(nil),
|
||||
(*AttributeValue_BoolValue)(nil),
|
||||
|
@ -1113,97 +1091,6 @@ func (*AttributeValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer
|
|||
}
|
||||
}
|
||||
|
||||
func _AttributeValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*AttributeValue)
|
||||
// value
|
||||
switch x := m.Value.(type) {
|
||||
case *AttributeValue_StringValue:
|
||||
b.EncodeVarint(1<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.StringValue); err != nil {
|
||||
return err
|
||||
}
|
||||
case *AttributeValue_IntValue:
|
||||
b.EncodeVarint(2<<3 | proto.WireVarint)
|
||||
b.EncodeVarint(uint64(x.IntValue))
|
||||
case *AttributeValue_BoolValue:
|
||||
t := uint64(0)
|
||||
if x.BoolValue {
|
||||
t = 1
|
||||
}
|
||||
b.EncodeVarint(3<<3 | proto.WireVarint)
|
||||
b.EncodeVarint(t)
|
||||
case *AttributeValue_DoubleValue:
|
||||
b.EncodeVarint(4<<3 | proto.WireFixed64)
|
||||
b.EncodeFixed64(math.Float64bits(x.DoubleValue))
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("AttributeValue.Value has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _AttributeValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*AttributeValue)
|
||||
switch tag {
|
||||
case 1: // value.string_value
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(TruncatableString)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Value = &AttributeValue_StringValue{msg}
|
||||
return true, err
|
||||
case 2: // value.int_value
|
||||
if wire != proto.WireVarint {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeVarint()
|
||||
m.Value = &AttributeValue_IntValue{int64(x)}
|
||||
return true, err
|
||||
case 3: // value.bool_value
|
||||
if wire != proto.WireVarint {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeVarint()
|
||||
m.Value = &AttributeValue_BoolValue{x != 0}
|
||||
return true, err
|
||||
case 4: // value.double_value
|
||||
if wire != proto.WireFixed64 {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeFixed64()
|
||||
m.Value = &AttributeValue_DoubleValue{math.Float64frombits(x)}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _AttributeValue_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*AttributeValue)
|
||||
// value
|
||||
switch x := m.Value.(type) {
|
||||
case *AttributeValue_StringValue:
|
||||
s := proto.Size(x.StringValue)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *AttributeValue_IntValue:
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(x.IntValue))
|
||||
case *AttributeValue_BoolValue:
|
||||
n += 1 // tag and wire
|
||||
n += 1
|
||||
case *AttributeValue_DoubleValue:
|
||||
n += 1 // tag and wire
|
||||
n += 8
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// The call stack which originated this span.
|
||||
type StackTrace struct {
|
||||
// Stack frames in this stack trace.
|
||||
|
@ -1554,101 +1441,103 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_8ea38bbb821bf584 = []byte{
|
||||
// 1524 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x5b, 0x6f, 0x1b, 0xb7,
|
||||
0x12, 0xf6, 0xea, 0xae, 0x91, 0xac, 0xc8, 0x8c, 0x93, 0xc8, 0x3a, 0x39, 0x27, 0x3e, 0x4a, 0x82,
|
||||
0xe3, 0x9c, 0xd6, 0x72, 0xe2, 0xa4, 0x41, 0xae, 0x48, 0x7d, 0x91, 0x2b, 0xc5, 0x8e, 0xaa, 0x50,
|
||||
0x8a, 0xd1, 0x0b, 0x8a, 0xc5, 0x4a, 0x4b, 0xcb, 0x5b, 0x4b, 0xdc, 0xed, 0x92, 0xeb, 0xc2, 0x79,
|
||||
0xeb, 0x53, 0x51, 0xf4, 0xad, 0x40, 0xd1, 0x3f, 0xd0, 0x87, 0xfe, 0x9d, 0xa2, 0xbf, 0xa3, 0xbf,
|
||||
0xa0, 0x2f, 0x05, 0xc9, 0xbd, 0xc9, 0x49, 0x6c, 0x55, 0x79, 0x31, 0xb8, 0xe4, 0x7c, 0x1f, 0x39,
|
||||
0x9c, 0x6f, 0x86, 0x63, 0xc1, 0x4d, 0xdb, 0x21, 0x74, 0x40, 0x28, 0xf3, 0xd8, 0x9a, 0xe3, 0xda,
|
||||
0xdc, 0x5e, 0xe3, 0xae, 0x31, 0x20, 0x6b, 0xc7, 0x77, 0xd4, 0xa0, 0x2e, 0x27, 0xd1, 0x52, 0x64,
|
||||
0xa6, 0x66, 0xea, 0x6a, 0xf5, 0xf8, 0x4e, 0xf5, 0xda, 0xd0, 0xb6, 0x87, 0x23, 0xa2, 0xd0, 0x7d,
|
||||
0xef, 0x60, 0x8d, 0x5b, 0x63, 0xc2, 0xb8, 0x31, 0x76, 0x94, 0x65, 0xf5, 0x3f, 0xa7, 0x0d, 0xbe,
|
||||
0x75, 0x0d, 0xc7, 0x21, 0xae, 0xcf, 0x54, 0xfb, 0xee, 0x12, 0xa4, 0xba, 0x8e, 0x41, 0xd1, 0x12,
|
||||
0xe4, 0x24, 0xab, 0x6e, 0x99, 0x15, 0x6d, 0x59, 0x5b, 0x29, 0xe2, 0xac, 0xfc, 0x6e, 0x99, 0xe8,
|
||||
0x0a, 0x64, 0x99, 0x63, 0x50, 0xb1, 0x92, 0x90, 0x2b, 0x19, 0xf1, 0xd9, 0x32, 0xd1, 0x73, 0x00,
|
||||
0x69, 0xc3, 0xb8, 0xc1, 0x49, 0xe5, 0xc2, 0xb2, 0xb6, 0x52, 0x58, 0xff, 0x7f, 0xfd, 0x9d, 0xa7,
|
||||
0xad, 0x8b, 0x8d, 0xea, 0xbd, 0x10, 0x81, 0x63, 0x68, 0x74, 0x03, 0x4a, 0x8e, 0xe1, 0x12, 0xca,
|
||||
0xf5, 0x60, 0xaf, 0xa4, 0xdc, 0xab, 0xa8, 0x66, 0xbb, 0x6a, 0xc7, 0x8f, 0x21, 0x45, 0x8d, 0x31,
|
||||
0xa9, 0xa4, 0xe4, 0x5e, 0x1f, 0x9e, 0xb1, 0x57, 0xcf, 0xf5, 0xe8, 0xc0, 0xe0, 0x46, 0x7f, 0x44,
|
||||
0xba, 0xdc, 0xb5, 0xe8, 0x10, 0x4b, 0x24, 0x7a, 0x02, 0xa9, 0x23, 0x8b, 0x9a, 0x95, 0xd2, 0xb2,
|
||||
0xb6, 0x52, 0x5a, 0x5f, 0x39, 0xef, 0xb4, 0xe2, 0xcf, 0xae, 0x45, 0x4d, 0x2c, 0x51, 0xe8, 0x21,
|
||||
0x00, 0xe3, 0x86, 0xcb, 0x75, 0x71, 0xcf, 0x95, 0xb4, 0x3c, 0x45, 0xb5, 0xae, 0xee, 0xb8, 0x1e,
|
||||
0xdc, 0x71, 0xbd, 0x17, 0x04, 0x01, 0xe7, 0xa5, 0xb5, 0xf8, 0x46, 0x1f, 0x41, 0x8e, 0x50, 0x53,
|
||||
0x01, 0x33, 0xe7, 0x02, 0xb3, 0x84, 0x9a, 0x12, 0xf6, 0x1c, 0xc0, 0xe0, 0xdc, 0xb5, 0xfa, 0x1e,
|
||||
0x27, 0xac, 0x92, 0x9d, 0xee, 0x8e, 0x37, 0x42, 0x04, 0x8e, 0xa1, 0xd1, 0x0e, 0x14, 0x18, 0x37,
|
||||
0x06, 0x47, 0xba, 0xb4, 0xae, 0xe4, 0x24, 0xd9, 0xcd, 0xb3, 0xc8, 0x84, 0xb5, 0x0c, 0x18, 0x06,
|
||||
0x16, 0x8e, 0xd1, 0x2e, 0x14, 0x84, 0x1b, 0x3a, 0x39, 0x26, 0x94, 0xb3, 0x4a, 0x7e, 0xca, 0xc0,
|
||||
0x5b, 0x63, 0xd2, 0x90, 0x08, 0x0c, 0x3c, 0x1c, 0xa3, 0xc7, 0x90, 0x1e, 0x59, 0xf4, 0x88, 0x55,
|
||||
0xe0, 0xfc, 0xe3, 0x08, 0x9a, 0x3d, 0x61, 0x8c, 0x15, 0x06, 0x3d, 0x84, 0x8c, 0x90, 0x8f, 0xc7,
|
||||
0x2a, 0x05, 0x89, 0xfe, 0xef, 0xd9, 0xce, 0x70, 0x8f, 0x61, 0x1f, 0x80, 0x3e, 0x83, 0x7f, 0x31,
|
||||
0x63, 0x4c, 0x74, 0xc7, 0xb5, 0x07, 0x84, 0x31, 0xdd, 0x60, 0x7a, 0x4c, 0x80, 0x95, 0xe2, 0x3b,
|
||||
0x42, 0xb4, 0x69, 0xdb, 0xa3, 0x7d, 0x63, 0xe4, 0x11, 0x7c, 0x45, 0xc0, 0x3b, 0x0a, 0xbd, 0xc1,
|
||||
0x3a, 0xa1, 0x4c, 0xd1, 0x0e, 0x94, 0x07, 0x87, 0xd6, 0xc8, 0x54, 0x4a, 0x1e, 0xd8, 0x1e, 0xe5,
|
||||
0x95, 0x79, 0x49, 0x77, 0xf5, 0x0d, 0xba, 0x57, 0x2d, 0xca, 0xef, 0xae, 0x2b, 0xc2, 0x92, 0x44,
|
||||
0x09, 0x8a, 0x2d, 0x81, 0xa9, 0xfe, 0xa0, 0x01, 0x44, 0xd9, 0x82, 0x9e, 0x43, 0x96, 0x50, 0xee,
|
||||
0x5a, 0x84, 0x55, 0xb4, 0xe5, 0xe4, 0x4a, 0x61, 0xfd, 0xf6, 0xf4, 0xa9, 0x56, 0x6f, 0x50, 0xee,
|
||||
0x9e, 0xe0, 0x80, 0xa0, 0xba, 0x06, 0x69, 0x39, 0x83, 0xca, 0x90, 0x3c, 0x22, 0x27, 0x32, 0xe3,
|
||||
0xf3, 0x58, 0x0c, 0xd1, 0x22, 0xa4, 0x8f, 0xc5, 0x71, 0x64, 0xae, 0xe7, 0xb1, 0xfa, 0xa8, 0xfe,
|
||||
0x9c, 0x00, 0x88, 0x54, 0x85, 0x0c, 0x98, 0x0f, 0x75, 0xa5, 0x8f, 0x0d, 0xc7, 0x3f, 0xd1, 0x93,
|
||||
0xe9, 0x85, 0x19, 0x0d, 0x5f, 0x18, 0x8e, 0x3a, 0x5d, 0xd1, 0x88, 0x4d, 0xa1, 0x07, 0x50, 0x31,
|
||||
0x5d, 0xdb, 0x71, 0x88, 0xa9, 0x47, 0x12, 0xf6, 0x6f, 0x53, 0x1c, 0x2d, 0x8d, 0x2f, 0xfb, 0xeb,
|
||||
0x11, 0xa9, 0xba, 0xb7, 0xaf, 0x61, 0xe1, 0x0d, 0xf2, 0xb7, 0x38, 0xfa, 0x2c, 0xee, 0x68, 0x61,
|
||||
0xfd, 0xd6, 0x19, 0x67, 0x0f, 0xe9, 0x54, 0xa0, 0x14, 0xee, 0x51, 0xe2, 0x81, 0x56, 0xfd, 0x35,
|
||||
0x0d, 0xf9, 0x50, 0xd8, 0xa8, 0x0e, 0x29, 0x99, 0xdf, 0xda, 0xb9, 0xf9, 0x2d, 0xed, 0xd0, 0x3e,
|
||||
0x80, 0x41, 0xa9, 0xcd, 0x0d, 0x6e, 0xd9, 0xd4, 0x3f, 0xc7, 0xbd, 0xa9, 0xf3, 0xa8, 0xbe, 0x11,
|
||||
0x62, 0x9b, 0x73, 0x38, 0xc6, 0x84, 0xbe, 0x82, 0xf9, 0x31, 0x61, 0xcc, 0x18, 0xfa, 0x39, 0x2a,
|
||||
0x6b, 0x69, 0x61, 0xfd, 0xfe, 0xf4, 0xd4, 0x2f, 0x14, 0x5c, 0x7e, 0x34, 0xe7, 0x70, 0x71, 0x1c,
|
||||
0xfb, 0xae, 0xfe, 0xa6, 0x01, 0x44, 0x7b, 0xa3, 0x36, 0x14, 0x4c, 0xc2, 0x06, 0xae, 0xe5, 0x48,
|
||||
0x37, 0xb4, 0x19, 0x6a, 0x73, 0x9c, 0xe0, 0x54, 0xc9, 0x4b, 0xbc, 0x4f, 0xc9, 0xab, 0xfe, 0xa5,
|
||||
0x41, 0x31, 0xee, 0x0b, 0xfa, 0x14, 0x52, 0xfc, 0xc4, 0x51, 0x21, 0x2a, 0xad, 0x3f, 0x9e, 0xed,
|
||||
0x46, 0xea, 0xbd, 0x13, 0x87, 0x60, 0x49, 0x84, 0x4a, 0x90, 0xf0, 0x1f, 0xc6, 0x14, 0x4e, 0x58,
|
||||
0x26, 0xfa, 0x00, 0x16, 0x3c, 0x3a, 0xb0, 0xc7, 0x8e, 0x4b, 0x18, 0x23, 0xa6, 0xce, 0xac, 0xd7,
|
||||
0x44, 0xde, 0x7f, 0x0a, 0x97, 0xe3, 0x0b, 0x5d, 0xeb, 0x35, 0x41, 0xff, 0x83, 0x0b, 0xa7, 0x4d,
|
||||
0x53, 0xd2, 0xb4, 0x34, 0x69, 0x58, 0xbb, 0x07, 0x29, 0xb1, 0x27, 0x5a, 0x84, 0x72, 0xef, 0xf3,
|
||||
0x4e, 0x43, 0x7f, 0xd5, 0xee, 0x76, 0x1a, 0x5b, 0xad, 0x9d, 0x56, 0x63, 0xbb, 0x3c, 0x87, 0x72,
|
||||
0x90, 0xea, 0x36, 0xda, 0xbd, 0xb2, 0x86, 0x8a, 0x90, 0xc3, 0x8d, 0xad, 0x46, 0x6b, 0xbf, 0xb1,
|
||||
0x5d, 0x4e, 0x6c, 0x66, 0x7d, 0x89, 0x57, 0xff, 0x10, 0xa5, 0x24, 0xaa, 0xb9, 0x4d, 0x80, 0xa8,
|
||||
0x80, 0xfb, 0xb9, 0x7b, 0x6b, 0xea, 0xab, 0xc0, 0xf9, 0xb0, 0x7c, 0xa3, 0x47, 0xb0, 0x14, 0x66,
|
||||
0x69, 0xa8, 0x88, 0xc9, 0x34, 0xbd, 0x12, 0xa4, 0x69, 0xb4, 0x2e, 0xf3, 0x14, 0x3d, 0x83, 0xab,
|
||||
0x01, 0x76, 0x42, 0xad, 0x01, 0x3c, 0x29, 0xe1, 0x01, 0x7f, 0xfc, 0xfe, 0xfd, 0x44, 0xff, 0x29,
|
||||
0x01, 0x29, 0xf1, 0x1c, 0xcc, 0xd4, 0xbc, 0x3c, 0xf5, 0x85, 0x90, 0x94, 0x42, 0xb8, 0x35, 0xcd,
|
||||
0xb3, 0x13, 0x0f, 0xfb, 0xa4, 0x48, 0x53, 0xef, 0x23, 0xd2, 0xda, 0xee, 0x99, 0xc1, 0xbd, 0x04,
|
||||
0x0b, 0x5b, 0xcd, 0xd6, 0xde, 0xb6, 0xbe, 0xd7, 0x6a, 0xef, 0x36, 0xb6, 0xf5, 0x6e, 0x67, 0xa3,
|
||||
0x5d, 0xd6, 0xd0, 0x65, 0x40, 0x9d, 0x0d, 0xdc, 0x68, 0xf7, 0x26, 0xe6, 0x13, 0xd5, 0x6f, 0x20,
|
||||
0x2d, 0x9f, 0x48, 0xf4, 0x00, 0x52, 0xe2, 0x91, 0xf4, 0xc3, 0x7b, 0x63, 0x1a, 0x07, 0xb1, 0x44,
|
||||
0xa0, 0x3a, 0x5c, 0x0c, 0x02, 0x23, 0x9f, 0xd9, 0x89, 0x70, 0x2e, 0xf8, 0x4b, 0x72, 0x13, 0x19,
|
||||
0x87, 0xda, 0x53, 0xc8, 0x05, 0x7d, 0x12, 0x5a, 0x82, 0x4b, 0xe2, 0x20, 0xfa, 0x6e, 0xab, 0xbd,
|
||||
0x7d, 0xca, 0x11, 0x80, 0x4c, 0xb7, 0x81, 0xf7, 0x1b, 0xb8, 0xac, 0x89, 0xf1, 0xd6, 0x5e, 0x4b,
|
||||
0x68, 0x36, 0x51, 0xbb, 0x0f, 0x19, 0xf5, 0x36, 0x23, 0x04, 0xa9, 0x81, 0x6d, 0xaa, 0xe4, 0x4c,
|
||||
0x63, 0x39, 0x46, 0x15, 0xc8, 0xfa, 0xea, 0xf0, 0x5f, 0xa4, 0xe0, 0xb3, 0xf6, 0xbb, 0x06, 0xa5,
|
||||
0xc9, 0xca, 0x8c, 0x5e, 0x42, 0x91, 0xc9, 0x8a, 0xa2, 0xab, 0xd2, 0x3e, 0x43, 0x2d, 0x6a, 0xce,
|
||||
0xe1, 0x82, 0xe2, 0x50, 0x94, 0xff, 0x86, 0xbc, 0x45, 0xb9, 0x1e, 0x3d, 0x15, 0xc9, 0xe6, 0x1c,
|
||||
0xce, 0x59, 0x94, 0xab, 0xe5, 0x6b, 0x00, 0x7d, 0xdb, 0x1e, 0xf9, 0xeb, 0x42, 0x4c, 0xb9, 0xe6,
|
||||
0x1c, 0xce, 0xf7, 0x83, 0x36, 0x01, 0x5d, 0x87, 0xa2, 0x69, 0x7b, 0xfd, 0x11, 0xf1, 0x4d, 0x84,
|
||||
0x54, 0x34, 0xb1, 0x89, 0x9a, 0x95, 0x46, 0x61, 0xa2, 0xd6, 0x7e, 0xcc, 0x00, 0x44, 0x5d, 0x17,
|
||||
0xea, 0x09, 0x7f, 0x44, 0xc7, 0x76, 0xe0, 0x1a, 0x63, 0xf9, 0xf0, 0x0b, 0x7f, 0xee, 0x4c, 0xd5,
|
||||
0xb2, 0xa9, 0xe1, 0x8e, 0x04, 0x62, 0xd5, 0xf8, 0xa9, 0x0f, 0xb4, 0x0a, 0x17, 0x63, 0x7d, 0xa0,
|
||||
0x7e, 0x68, 0xb0, 0x43, 0x3d, 0xac, 0x61, 0xe5, 0xa8, 0xd1, 0x6b, 0x1a, 0xec, 0xb0, 0x65, 0x56,
|
||||
0xff, 0x4c, 0xfa, 0x67, 0x92, 0x70, 0xf4, 0x12, 0xe6, 0x0f, 0x3c, 0x3a, 0x10, 0x89, 0xac, 0xcb,
|
||||
0x66, 0x7c, 0x96, 0x82, 0x5f, 0x0c, 0x28, 0xda, 0x82, 0xb2, 0x0f, 0x97, 0x6d, 0xd7, 0x1a, 0x5a,
|
||||
0xd4, 0x18, 0xe9, 0x93, 0xdc, 0x89, 0x19, 0xb8, 0x17, 0x03, 0xae, 0x9d, 0xf8, 0x1e, 0x2d, 0xc8,
|
||||
0x1f, 0x58, 0x23, 0xa2, 0x68, 0x93, 0x33, 0xd0, 0xe6, 0x04, 0x5c, 0x52, 0x5d, 0x83, 0xc2, 0xc8,
|
||||
0xa2, 0x44, 0xa7, 0xde, 0xb8, 0x4f, 0x5c, 0x19, 0xd1, 0x24, 0x06, 0x31, 0xd5, 0x96, 0x33, 0xe8,
|
||||
0x3a, 0xcc, 0x0f, 0xec, 0x91, 0x37, 0xa6, 0x81, 0x49, 0x5a, 0x9a, 0x14, 0xd5, 0xa4, 0x6f, 0xb4,
|
||||
0x09, 0x85, 0x91, 0x6d, 0x98, 0xfa, 0xd8, 0x36, 0xbd, 0x51, 0xf0, 0x3f, 0xc1, 0x59, 0x0d, 0xec,
|
||||
0x0b, 0x69, 0x88, 0x41, 0xa0, 0xd4, 0x18, 0x75, 0xa1, 0xc4, 0x6c, 0xcf, 0x1d, 0x10, 0xfd, 0x98,
|
||||
0xb8, 0x4c, 0xbc, 0xbe, 0xd9, 0x19, 0x3c, 0x9b, 0x57, 0x1c, 0xfb, 0x8a, 0xa2, 0xfa, 0xbd, 0x06,
|
||||
0x85, 0x98, 0x76, 0xd0, 0x0e, 0xa4, 0xa5, 0xfc, 0xa6, 0x69, 0x3b, 0xdf, 0xa6, 0x3e, 0xac, 0xe0,
|
||||
0xe8, 0x36, 0x2c, 0x06, 0x65, 0x45, 0xc9, 0x79, 0xa2, 0xae, 0x20, 0x7f, 0x4d, 0x6d, 0xaa, 0x0a,
|
||||
0xcb, 0x2f, 0x1a, 0x64, 0x7c, 0x4f, 0xb7, 0x21, 0xe3, 0x5f, 0xd4, 0x2c, 0x72, 0xf3, 0xb1, 0xe8,
|
||||
0x13, 0xc8, 0xf5, 0x3d, 0xd1, 0x9a, 0xfb, 0x72, 0xff, 0xa7, 0x3c, 0x59, 0x89, 0x6e, 0x99, 0xb5,
|
||||
0x2f, 0x61, 0xe1, 0x8d, 0xd5, 0xa8, 0x75, 0xd6, 0x62, 0xad, 0xb3, 0x70, 0x9b, 0x2b, 0x53, 0x62,
|
||||
0xea, 0xfd, 0x13, 0x4e, 0x26, 0xdd, 0x0e, 0xd7, 0x36, 0x4f, 0x38, 0x91, 0x6e, 0x6f, 0x3a, 0x70,
|
||||
0xd5, 0xb2, 0xdf, 0x7d, 0xae, 0x4d, 0xf5, 0x5f, 0x41, 0x47, 0x4c, 0x76, 0xb4, 0x2f, 0x36, 0x87,
|
||||
0x16, 0x3f, 0xf4, 0xfa, 0xf5, 0x81, 0x3d, 0x5e, 0x53, 0xf6, 0xab, 0x16, 0x65, 0xdc, 0xf5, 0xc6,
|
||||
0x84, 0xaa, 0xf7, 0x76, 0x2d, 0xa2, 0x5a, 0x55, 0xbf, 0x33, 0x0c, 0x09, 0x5d, 0x1d, 0x46, 0x3f,
|
||||
0x37, 0xf4, 0x33, 0x72, 0xfa, 0xee, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x5e, 0xa6, 0x6a,
|
||||
0x92, 0x10, 0x00, 0x00,
|
||||
// 1557 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xeb, 0x52, 0x1b, 0x47,
|
||||
0x16, 0x66, 0x74, 0xd7, 0x91, 0x90, 0x45, 0x1b, 0xdb, 0x83, 0xd6, 0xbb, 0x66, 0x65, 0x7b, 0x17,
|
||||
0xaf, 0x17, 0x61, 0xb0, 0xd7, 0xe5, 0x6b, 0x79, 0x11, 0x88, 0x48, 0x06, 0x2b, 0x72, 0x4b, 0xa6,
|
||||
0x72, 0xa9, 0xd4, 0xd4, 0x48, 0xd3, 0x88, 0x09, 0x52, 0xcf, 0x64, 0xa6, 0x87, 0x14, 0x7e, 0x81,
|
||||
0x54, 0x2a, 0xff, 0x52, 0x95, 0xca, 0x0b, 0xe4, 0x47, 0x5e, 0x24, 0x0f, 0x90, 0xca, 0x73, 0xe4,
|
||||
0x09, 0xf2, 0x27, 0xd5, 0xdd, 0x73, 0x13, 0xd8, 0xa0, 0xc8, 0x7f, 0xa8, 0x9e, 0xee, 0xf3, 0x7d,
|
||||
0x7d, 0x4e, 0x9f, 0x2b, 0x82, 0xdb, 0x96, 0x4d, 0xe8, 0x80, 0x50, 0xd7, 0x73, 0xd7, 0x6c, 0xc7,
|
||||
0x62, 0xd6, 0x1a, 0x73, 0xf4, 0x01, 0x59, 0x3b, 0x5e, 0x97, 0x8b, 0x9a, 0xd8, 0x44, 0x4b, 0x91,
|
||||
0x98, 0xdc, 0xa9, 0xc9, 0xd3, 0xe3, 0xf5, 0xca, 0xdd, 0x33, 0x0c, 0x0e, 0x71, 0x2d, 0xcf, 0x91,
|
||||
0x24, 0xc1, 0x5a, 0xa2, 0x2a, 0x37, 0x86, 0x96, 0x35, 0x1c, 0x11, 0x29, 0xd8, 0xf7, 0x0e, 0xd6,
|
||||
0x98, 0x39, 0x26, 0x2e, 0xd3, 0xc7, 0xb6, 0x2f, 0xf0, 0x8f, 0xd3, 0x02, 0x5f, 0x3b, 0xba, 0x6d,
|
||||
0x13, 0xc7, 0xbf, 0xb6, 0xfa, 0xcb, 0x15, 0x48, 0x75, 0x6d, 0x9d, 0xa2, 0x25, 0xc8, 0x09, 0x15,
|
||||
0x34, 0xd3, 0x50, 0x95, 0x65, 0x65, 0xa5, 0x88, 0xb3, 0xe2, 0xbb, 0x65, 0xa0, 0x6b, 0x90, 0x75,
|
||||
0x6d, 0x9d, 0xf2, 0x93, 0x84, 0x38, 0xc9, 0xf0, 0xcf, 0x96, 0x81, 0x5e, 0x02, 0x08, 0x19, 0x97,
|
||||
0xe9, 0x8c, 0xa8, 0x97, 0x96, 0x95, 0x95, 0xc2, 0xc6, 0x7f, 0x6a, 0xef, 0x35, 0xad, 0xc6, 0x2f,
|
||||
0xaa, 0xf5, 0x42, 0x04, 0x8e, 0xa1, 0xd1, 0x2d, 0x28, 0xd9, 0xba, 0x43, 0x28, 0xd3, 0x82, 0xbb,
|
||||
0x92, 0xe2, 0xae, 0xa2, 0xdc, 0xed, 0xca, 0x1b, 0xff, 0x0f, 0x29, 0xaa, 0x8f, 0x89, 0x9a, 0x12,
|
||||
0x77, 0xfd, 0xf7, 0x9c, 0xbb, 0x7a, 0x8e, 0x47, 0x07, 0x3a, 0xd3, 0xfb, 0x23, 0xd2, 0x65, 0x8e,
|
||||
0x49, 0x87, 0x58, 0x20, 0xd1, 0x33, 0x48, 0x1d, 0x99, 0xd4, 0x50, 0x4b, 0xcb, 0xca, 0x4a, 0x69,
|
||||
0x63, 0xe5, 0x22, 0x6d, 0xf9, 0x9f, 0x5d, 0x93, 0x1a, 0x58, 0xa0, 0xd0, 0x63, 0x00, 0x97, 0xe9,
|
||||
0x0e, 0xd3, 0xf8, 0x3b, 0xab, 0x69, 0xa1, 0x45, 0xa5, 0x26, 0xdf, 0xb8, 0x16, 0xbc, 0x71, 0xad,
|
||||
0x17, 0x38, 0x01, 0xe7, 0x85, 0x34, 0xff, 0x46, 0xff, 0x83, 0x1c, 0xa1, 0x86, 0x04, 0x66, 0x2e,
|
||||
0x04, 0x66, 0x09, 0x35, 0x04, 0xec, 0x25, 0x80, 0xce, 0x98, 0x63, 0xf6, 0x3d, 0x46, 0x5c, 0x35,
|
||||
0x3b, 0xdd, 0x1b, 0x6f, 0x86, 0x08, 0x1c, 0x43, 0xa3, 0x1d, 0x28, 0xb8, 0x4c, 0x1f, 0x1c, 0x69,
|
||||
0x42, 0x5a, 0xcd, 0x09, 0xb2, 0xdb, 0xe7, 0x91, 0x71, 0x69, 0xe1, 0x30, 0x0c, 0x6e, 0xb8, 0x46,
|
||||
0xbb, 0x50, 0xe0, 0x66, 0x68, 0xe4, 0x98, 0x50, 0xe6, 0xaa, 0xf9, 0x29, 0x1d, 0x6f, 0x8e, 0x49,
|
||||
0x43, 0x20, 0x30, 0xb0, 0x70, 0x8d, 0x9e, 0x42, 0x7a, 0x64, 0xd2, 0x23, 0x57, 0x85, 0x8b, 0xd5,
|
||||
0xe1, 0x34, 0x7b, 0x5c, 0x18, 0x4b, 0x0c, 0x7a, 0x0c, 0x19, 0x1e, 0x3e, 0x9e, 0xab, 0x16, 0x04,
|
||||
0xfa, 0x9f, 0xe7, 0x1b, 0xc3, 0x3c, 0x17, 0xfb, 0x00, 0x54, 0x87, 0x5c, 0x90, 0x4c, 0x6a, 0x59,
|
||||
0x80, 0xff, 0x75, 0x16, 0x1c, 0xa6, 0xdb, 0xf1, 0x7a, 0x0d, 0xfb, 0x6b, 0x1c, 0xe2, 0xd0, 0x27,
|
||||
0xf0, 0x37, 0x57, 0x1f, 0x13, 0xcd, 0x76, 0xac, 0x01, 0x71, 0x5d, 0x4d, 0x77, 0xb5, 0x58, 0x10,
|
||||
0xab, 0xc5, 0xf7, 0xb8, 0xb9, 0x6e, 0x59, 0xa3, 0x7d, 0x7d, 0xe4, 0x11, 0x7c, 0x8d, 0xc3, 0x3b,
|
||||
0x12, 0xbd, 0xe9, 0x76, 0xc2, 0x50, 0x47, 0x3b, 0x50, 0x1e, 0x1c, 0x9a, 0x23, 0x43, 0x66, 0xc3,
|
||||
0xc0, 0xf2, 0x28, 0x53, 0xe7, 0x05, 0xdd, 0xf5, 0x33, 0x74, 0x6f, 0x5a, 0x94, 0xdd, 0xdf, 0x90,
|
||||
0x84, 0x25, 0x81, 0xe2, 0x14, 0x5b, 0x1c, 0x53, 0xf9, 0x56, 0x01, 0x88, 0x32, 0x0e, 0xbd, 0x84,
|
||||
0x2c, 0xa1, 0xcc, 0x31, 0x89, 0xab, 0x2a, 0xcb, 0xc9, 0x95, 0xc2, 0xc6, 0xbd, 0xe9, 0xd3, 0xb5,
|
||||
0xd6, 0xa0, 0xcc, 0x39, 0xc1, 0x01, 0x41, 0x65, 0x0d, 0xd2, 0x62, 0x07, 0x95, 0x21, 0x79, 0x44,
|
||||
0x4e, 0x44, 0xd5, 0xc8, 0x63, 0xbe, 0x44, 0x8b, 0x90, 0x3e, 0xe6, 0xea, 0x88, 0x7a, 0x91, 0xc7,
|
||||
0xf2, 0xa3, 0xf2, 0x43, 0x02, 0x20, 0x8a, 0x4c, 0xa4, 0xc3, 0x7c, 0x18, 0x9b, 0xda, 0x58, 0xb7,
|
||||
0x7d, 0x8d, 0x9e, 0x4d, 0x1f, 0xdc, 0xd1, 0xf2, 0x95, 0x6e, 0x4b, 0xed, 0x8a, 0x7a, 0x6c, 0x0b,
|
||||
0x3d, 0x02, 0xd5, 0x70, 0x2c, 0xdb, 0x26, 0x86, 0x16, 0xa5, 0x81, 0xff, 0x9a, 0x5c, 0xb5, 0x34,
|
||||
0xbe, 0xea, 0x9f, 0x47, 0xa4, 0xf2, 0xdd, 0xbe, 0x84, 0x85, 0x33, 0xe4, 0xef, 0x30, 0xf4, 0x45,
|
||||
0xdc, 0xd0, 0xc2, 0xc6, 0x9d, 0x73, 0x74, 0x0f, 0xe9, 0xa4, 0xa3, 0x24, 0xee, 0x49, 0xe2, 0x91,
|
||||
0x52, 0xf9, 0x29, 0x0d, 0xf9, 0x30, 0x39, 0x50, 0x0d, 0x52, 0xa2, 0x46, 0x28, 0x17, 0xd6, 0x08,
|
||||
0x21, 0x87, 0xf6, 0x01, 0x74, 0x4a, 0x2d, 0xa6, 0x33, 0xd3, 0xa2, 0xbe, 0x1e, 0x0f, 0xa6, 0xce,
|
||||
0xc5, 0xda, 0x66, 0x88, 0x6d, 0xce, 0xe1, 0x18, 0x13, 0xfa, 0x02, 0xe6, 0xc7, 0xc4, 0x75, 0xf5,
|
||||
0xa1, 0x9f, 0xe7, 0xa2, 0x1e, 0x17, 0x36, 0x1e, 0x4e, 0x4f, 0xfd, 0x4a, 0xc2, 0xc5, 0x47, 0x73,
|
||||
0x0e, 0x17, 0xc7, 0xb1, 0xef, 0xca, 0xcf, 0x0a, 0x40, 0x74, 0x37, 0x6a, 0x43, 0xc1, 0x20, 0xee,
|
||||
0xc0, 0x31, 0x6d, 0x61, 0x86, 0x32, 0x43, 0x7d, 0x8f, 0x13, 0x9c, 0x2a, 0x9b, 0x89, 0x0f, 0x29,
|
||||
0x9b, 0x95, 0x3f, 0x14, 0x28, 0xc6, 0x6d, 0x41, 0x1f, 0x43, 0x8a, 0x9d, 0xd8, 0xd2, 0x45, 0xa5,
|
||||
0x8d, 0xa7, 0xb3, 0xbd, 0x48, 0xad, 0x77, 0x62, 0x13, 0x2c, 0x88, 0x50, 0x09, 0x12, 0x7e, 0x73,
|
||||
0x4d, 0xe1, 0x84, 0x69, 0xa0, 0xbb, 0xb0, 0xe0, 0xd1, 0x81, 0x35, 0xb6, 0x1d, 0xe2, 0xba, 0xc4,
|
||||
0xd0, 0x5c, 0xf3, 0x2d, 0x11, 0xef, 0x9f, 0xc2, 0xe5, 0xf8, 0x41, 0xd7, 0x7c, 0x4b, 0xd0, 0xbf,
|
||||
0xe1, 0xd2, 0x69, 0xd1, 0x94, 0x10, 0x2d, 0x4d, 0x0a, 0x56, 0x1f, 0x40, 0x8a, 0xdf, 0x89, 0x16,
|
||||
0xa1, 0xdc, 0xfb, 0xb4, 0xd3, 0xd0, 0xde, 0xb4, 0xbb, 0x9d, 0xc6, 0x56, 0x6b, 0xa7, 0xd5, 0xd8,
|
||||
0x2e, 0xcf, 0xa1, 0x1c, 0xa4, 0xba, 0x8d, 0x76, 0xaf, 0xac, 0xa0, 0x22, 0xe4, 0x70, 0x63, 0xab,
|
||||
0xd1, 0xda, 0x6f, 0x6c, 0x97, 0x13, 0xf5, 0xac, 0x1f, 0xe2, 0x95, 0xdf, 0x78, 0x29, 0x89, 0xea,
|
||||
0x76, 0x13, 0x20, 0x6a, 0x02, 0x7e, 0xee, 0xde, 0x99, 0xfa, 0x29, 0x70, 0x3e, 0x6c, 0x01, 0xe8,
|
||||
0x09, 0x2c, 0x85, 0x59, 0x1a, 0x46, 0xc4, 0x64, 0x9a, 0x5e, 0x0b, 0xd2, 0x34, 0x3a, 0x17, 0x79,
|
||||
0x8a, 0x5e, 0xc0, 0xf5, 0x00, 0x3b, 0x11, 0xad, 0x01, 0x3c, 0x29, 0xe0, 0x01, 0x7f, 0xfc, 0xfd,
|
||||
0xfd, 0x44, 0xff, 0x3e, 0x01, 0x29, 0xde, 0x52, 0x66, 0x1a, 0x80, 0x9e, 0xfb, 0x81, 0x90, 0x14,
|
||||
0x81, 0x70, 0x67, 0x9a, 0xd6, 0x15, 0x77, 0xfb, 0x64, 0x90, 0xa6, 0x3e, 0x24, 0x48, 0xab, 0xbb,
|
||||
0xe7, 0x3a, 0xf7, 0x0a, 0x2c, 0x6c, 0x35, 0x5b, 0x7b, 0xdb, 0xda, 0x5e, 0xab, 0xbd, 0xdb, 0xd8,
|
||||
0xd6, 0xba, 0x9d, 0xcd, 0x76, 0x59, 0x41, 0x57, 0x01, 0x75, 0x36, 0x71, 0xa3, 0xdd, 0x9b, 0xd8,
|
||||
0x4f, 0x54, 0xbe, 0x82, 0xb4, 0x68, 0xb3, 0xe8, 0x11, 0xa4, 0x78, 0xa3, 0xf5, 0xdd, 0x7b, 0x6b,
|
||||
0x1a, 0x03, 0xb1, 0x40, 0xa0, 0x1a, 0x5c, 0x0e, 0x1c, 0x23, 0x5a, 0xf5, 0x84, 0x3b, 0x17, 0xfc,
|
||||
0x23, 0x71, 0x89, 0xf0, 0x43, 0xf5, 0x39, 0xe4, 0x82, 0x59, 0x0b, 0x2d, 0xc1, 0x15, 0xae, 0x88,
|
||||
0xb6, 0xdb, 0x6a, 0x6f, 0x9f, 0x32, 0x04, 0x20, 0xd3, 0x6d, 0xe0, 0xfd, 0x06, 0x2e, 0x2b, 0x7c,
|
||||
0xbd, 0xb5, 0xd7, 0xe2, 0x31, 0x9b, 0xa8, 0x3e, 0x84, 0x8c, 0xec, 0xef, 0x08, 0x41, 0x6a, 0x60,
|
||||
0x19, 0x32, 0x39, 0xd3, 0x58, 0xac, 0x91, 0x0a, 0x59, 0x3f, 0x3a, 0xfc, 0x8e, 0x14, 0x7c, 0x56,
|
||||
0x7f, 0x55, 0xa0, 0x34, 0x59, 0x99, 0xd1, 0x6b, 0x28, 0xba, 0xa2, 0xa2, 0x68, 0xb2, 0xb4, 0xcf,
|
||||
0x50, 0x8b, 0x9a, 0x73, 0xb8, 0x20, 0x39, 0x24, 0xe5, 0xdf, 0x21, 0x6f, 0x52, 0xa6, 0x45, 0xad,
|
||||
0x22, 0xd9, 0x9c, 0xc3, 0x39, 0x93, 0x32, 0x79, 0x7c, 0x03, 0xa0, 0x6f, 0x59, 0x23, 0xff, 0x9c,
|
||||
0x07, 0x53, 0xae, 0x39, 0x87, 0xf3, 0xfd, 0x60, 0x4c, 0x40, 0x37, 0xa1, 0x68, 0x58, 0x5e, 0x7f,
|
||||
0x44, 0x7c, 0x11, 0x1e, 0x2a, 0x0a, 0xbf, 0x44, 0xee, 0x0a, 0xa1, 0x30, 0x51, 0xab, 0xdf, 0x65,
|
||||
0x00, 0xa2, 0xc9, 0x0d, 0xf5, 0xb8, 0x3d, 0x7c, 0xea, 0x3b, 0x70, 0xf4, 0xb1, 0x68, 0xfc, 0xdc,
|
||||
0x9e, 0xf5, 0xa9, 0xc6, 0x3e, 0xb9, 0xdc, 0x11, 0x40, 0x2c, 0x87, 0x47, 0xf9, 0x81, 0x56, 0xe1,
|
||||
0x72, 0x6c, 0x96, 0xd4, 0x0e, 0x75, 0xf7, 0x50, 0x0b, 0x6b, 0x58, 0x39, 0x1a, 0x16, 0x9b, 0xba,
|
||||
0x7b, 0xd8, 0x32, 0x2a, 0xbf, 0x27, 0x7d, 0x9d, 0x04, 0x1c, 0xbd, 0x86, 0xf9, 0x03, 0x8f, 0x0e,
|
||||
0x78, 0x22, 0x6b, 0x62, 0xa0, 0x9f, 0xa5, 0xe0, 0x17, 0x03, 0x8a, 0x36, 0xa7, 0xec, 0xc3, 0x55,
|
||||
0xcb, 0x31, 0x87, 0x26, 0xd5, 0x47, 0xda, 0x24, 0x77, 0x62, 0x06, 0xee, 0xc5, 0x80, 0x6b, 0x27,
|
||||
0x7e, 0x47, 0x0b, 0xf2, 0x07, 0xe6, 0x88, 0x48, 0xda, 0xe4, 0x0c, 0xb4, 0x39, 0x0e, 0x17, 0x54,
|
||||
0x37, 0xa0, 0x30, 0x32, 0x29, 0xd1, 0xa8, 0x37, 0xee, 0x13, 0x47, 0x78, 0x34, 0x89, 0x81, 0x6f,
|
||||
0xb5, 0xc5, 0x0e, 0xba, 0x09, 0xf3, 0x03, 0x6b, 0xe4, 0x8d, 0x69, 0x20, 0x92, 0x16, 0x22, 0x45,
|
||||
0xb9, 0xe9, 0x0b, 0xd5, 0xa1, 0x30, 0xb2, 0x74, 0x43, 0x1b, 0x5b, 0x86, 0x37, 0x0a, 0xfe, 0xaf,
|
||||
0x38, 0x6f, 0x08, 0x7e, 0x25, 0x04, 0x31, 0x70, 0x94, 0x5c, 0xa3, 0x2e, 0x94, 0xe4, 0x38, 0xab,
|
||||
0x1d, 0x13, 0xc7, 0xe5, 0xdd, 0x37, 0x3b, 0x83, 0x65, 0xf3, 0x92, 0x63, 0x5f, 0x52, 0x54, 0xbe,
|
||||
0x51, 0xa0, 0x10, 0x8b, 0x1d, 0xb4, 0x03, 0x69, 0x11, 0x7e, 0xd3, 0x8c, 0x9d, 0xef, 0x8a, 0x3e,
|
||||
0x2c, 0xe1, 0xe8, 0x1e, 0x2c, 0x06, 0x65, 0x45, 0x86, 0xf3, 0x44, 0x5d, 0x41, 0xfe, 0x99, 0xbc,
|
||||
0x54, 0x16, 0x96, 0x1f, 0x15, 0xc8, 0xf8, 0x96, 0x6e, 0x43, 0xc6, 0x7f, 0xa8, 0x59, 0xc2, 0xcd,
|
||||
0xc7, 0xa2, 0x8f, 0x20, 0xd7, 0xf7, 0xf8, 0x68, 0xee, 0x87, 0xfb, 0x5f, 0xe5, 0xc9, 0x0a, 0x74,
|
||||
0xcb, 0xa8, 0x7e, 0x0e, 0x0b, 0x67, 0x4e, 0xa3, 0xd1, 0x59, 0x89, 0x8d, 0xce, 0xdc, 0x6c, 0x26,
|
||||
0x45, 0x89, 0xa1, 0xf5, 0x4f, 0x18, 0x99, 0x34, 0x3b, 0x3c, 0xab, 0x9f, 0x30, 0x22, 0xcc, 0xae,
|
||||
0xdb, 0x70, 0xdd, 0xb4, 0xde, 0xaf, 0x57, 0x5d, 0xfe, 0x57, 0xd0, 0xe1, 0x9b, 0x1d, 0xe5, 0xb3,
|
||||
0xfa, 0xd0, 0x64, 0x87, 0x5e, 0xbf, 0x36, 0xb0, 0xc6, 0x6b, 0x52, 0x7e, 0xd5, 0xa4, 0x2e, 0x73,
|
||||
0xbc, 0x31, 0xa1, 0xb2, 0xdf, 0xae, 0x45, 0x54, 0xab, 0xf2, 0x67, 0x89, 0x21, 0xa1, 0xab, 0xc3,
|
||||
0xe8, 0xf7, 0x8d, 0x7e, 0x46, 0x6c, 0xdf, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0x1e, 0xe0, 0x94,
|
||||
0x45, 0x03, 0x11, 0x00, 0x00,
|
||||
}
|
||||
|
|
204
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
generated
vendored
204
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
generated
vendored
|
@ -18,9 +18,42 @@ var _ = math.Inf
|
|||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// Global configuration of the trace service.
|
||||
// How spans should be sampled:
|
||||
// - Always off
|
||||
// - Always on
|
||||
// - Always follow the parent Span's decision (off if no parent).
|
||||
type ConstantSampler_ConstantDecision int32
|
||||
|
||||
const (
|
||||
ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0
|
||||
ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1
|
||||
ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2
|
||||
)
|
||||
|
||||
var ConstantSampler_ConstantDecision_name = map[int32]string{
|
||||
0: "ALWAYS_OFF",
|
||||
1: "ALWAYS_ON",
|
||||
2: "ALWAYS_PARENT",
|
||||
}
|
||||
|
||||
var ConstantSampler_ConstantDecision_value = map[string]int32{
|
||||
"ALWAYS_OFF": 0,
|
||||
"ALWAYS_ON": 1,
|
||||
"ALWAYS_PARENT": 2,
|
||||
}
|
||||
|
||||
func (x ConstantSampler_ConstantDecision) String() string {
|
||||
return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x))
|
||||
}
|
||||
|
||||
func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5359209b41ff50c5, []int{2, 0}
|
||||
}
|
||||
|
||||
// Global configuration of the trace service. All fields must be specified, or
|
||||
// the default (zero) values will be used for each type.
|
||||
type TraceConfig struct {
|
||||
// The global default sampler used to make decisions on span sampling.
|
||||
//
|
||||
|
@ -145,99 +178,15 @@ func (m *TraceConfig) GetMaxNumberOfLinks() int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*TraceConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _TraceConfig_OneofMarshaler, _TraceConfig_OneofUnmarshaler, _TraceConfig_OneofSizer, []interface{}{
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*TraceConfig) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*TraceConfig_ProbabilitySampler)(nil),
|
||||
(*TraceConfig_ConstantSampler)(nil),
|
||||
(*TraceConfig_RateLimitingSampler)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _TraceConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*TraceConfig)
|
||||
// sampler
|
||||
switch x := m.Sampler.(type) {
|
||||
case *TraceConfig_ProbabilitySampler:
|
||||
b.EncodeVarint(1<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.ProbabilitySampler); err != nil {
|
||||
return err
|
||||
}
|
||||
case *TraceConfig_ConstantSampler:
|
||||
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.ConstantSampler); err != nil {
|
||||
return err
|
||||
}
|
||||
case *TraceConfig_RateLimitingSampler:
|
||||
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.RateLimitingSampler); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("TraceConfig.Sampler has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _TraceConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*TraceConfig)
|
||||
switch tag {
|
||||
case 1: // sampler.probability_sampler
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(ProbabilitySampler)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Sampler = &TraceConfig_ProbabilitySampler{msg}
|
||||
return true, err
|
||||
case 2: // sampler.constant_sampler
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(ConstantSampler)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Sampler = &TraceConfig_ConstantSampler{msg}
|
||||
return true, err
|
||||
case 3: // sampler.rate_limiting_sampler
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(RateLimitingSampler)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Sampler = &TraceConfig_RateLimitingSampler{msg}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _TraceConfig_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*TraceConfig)
|
||||
// sampler
|
||||
switch x := m.Sampler.(type) {
|
||||
case *TraceConfig_ProbabilitySampler:
|
||||
s := proto.Size(x.ProbabilitySampler)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *TraceConfig_ConstantSampler:
|
||||
s := proto.Size(x.ConstantSampler)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *TraceConfig_RateLimitingSampler:
|
||||
s := proto.Size(x.RateLimitingSampler)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Sampler that tries to uniformly sample traces with a given probability.
|
||||
// The probability of sampling a trace is equal to that of the specified probability.
|
||||
type ProbabilitySampler struct {
|
||||
|
@ -280,14 +229,12 @@ func (m *ProbabilitySampler) GetSamplingProbability() float64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
// Sampler that makes a constant decision (either always "yes" or always "no")
|
||||
// on span sampling.
|
||||
// Sampler that always makes a constant decision on span sampling.
|
||||
type ConstantSampler struct {
|
||||
// Whether spans should be always sampled, or never sampled.
|
||||
Decision bool `protobuf:"varint,1,opt,name=decision,proto3" json:"decision,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConstantSampler) Reset() { *m = ConstantSampler{} }
|
||||
|
@ -315,11 +262,11 @@ func (m *ConstantSampler) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo
|
||||
|
||||
func (m *ConstantSampler) GetDecision() bool {
|
||||
func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision {
|
||||
if m != nil {
|
||||
return m.Decision
|
||||
}
|
||||
return false
|
||||
return ConstantSampler_ALWAYS_OFF
|
||||
}
|
||||
|
||||
// Sampler that tries to sample with a rate per time window.
|
||||
|
@ -364,6 +311,7 @@ func (m *RateLimitingSampler) GetQps() int64 {
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value)
|
||||
proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig")
|
||||
proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler")
|
||||
proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler")
|
||||
|
@ -375,32 +323,36 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_5359209b41ff50c5 = []byte{
|
||||
// 431 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x41, 0x6f, 0xd4, 0x30,
|
||||
0x10, 0x85, 0x09, 0x5b, 0xda, 0x32, 0x3d, 0x74, 0xe5, 0xa8, 0x28, 0x45, 0x3d, 0x54, 0x7b, 0xa1,
|
||||
0x42, 0x24, 0xa1, 0x70, 0x40, 0x5c, 0x90, 0xd8, 0x0a, 0xc4, 0xa1, 0xc0, 0x2a, 0x20, 0x21, 0x71,
|
||||
0x09, 0x4e, 0xea, 0x0d, 0x16, 0xf1, 0x38, 0xd8, 0x93, 0xa8, 0xfc, 0x17, 0x7e, 0x6c, 0x15, 0x67,
|
||||
0x95, 0x64, 0xbb, 0xed, 0xde, 0xec, 0xf7, 0xde, 0xf7, 0x2c, 0x79, 0x6c, 0x78, 0xa1, 0x2b, 0x81,
|
||||
0xb9, 0x40, 0x5b, 0xdb, 0xb8, 0x32, 0x9a, 0x74, 0x4c, 0x86, 0xe7, 0x22, 0x6e, 0xce, 0xbb, 0x45,
|
||||
0x9a, 0x6b, 0x5c, 0xca, 0x22, 0x72, 0x1e, 0x3b, 0x1e, 0xd2, 0x9d, 0x12, 0xb9, 0x50, 0xd4, 0x9c,
|
||||
0xcf, 0xfe, 0xef, 0xc0, 0xc1, 0xf7, 0x76, 0x73, 0xe1, 0x00, 0xf6, 0x0b, 0xfc, 0xca, 0xe8, 0x8c,
|
||||
0x67, 0xb2, 0x94, 0xf4, 0x2f, 0xb5, 0x5c, 0x55, 0xa5, 0x30, 0x81, 0x77, 0xea, 0x9d, 0x1d, 0xbc,
|
||||
0x0a, 0xa3, 0x7b, 0x8b, 0xa2, 0xc5, 0x40, 0x7d, 0xeb, 0xa0, 0x4f, 0x0f, 0x12, 0x56, 0x6d, 0xa8,
|
||||
0xec, 0x07, 0x4c, 0x73, 0x8d, 0x96, 0x38, 0x52, 0x5f, 0xff, 0xd0, 0xd5, 0x3f, 0xdf, 0x52, 0x7f,
|
||||
0xb1, 0x42, 0x86, 0xee, 0xc3, 0x7c, 0x5d, 0x62, 0x57, 0x70, 0x64, 0x38, 0x89, 0xb4, 0x94, 0x4a,
|
||||
0x92, 0xc4, 0xa2, 0x6f, 0x9f, 0xb8, 0xf6, 0x68, 0x4b, 0x7b, 0xc2, 0x49, 0x5c, 0xae, 0xb0, 0xe1,
|
||||
0x04, 0xdf, 0x6c, 0xca, 0xec, 0x0d, 0x04, 0x8a, 0x5f, 0xa7, 0x58, 0xab, 0x4c, 0x98, 0x54, 0x2f,
|
||||
0x53, 0x4e, 0x64, 0x64, 0x56, 0x93, 0xb0, 0xc1, 0xce, 0xa9, 0x77, 0x36, 0x49, 0x8e, 0x14, 0xbf,
|
||||
0xfe, 0xe2, 0xec, 0xaf, 0xcb, 0xf7, 0xbd, 0xc9, 0xde, 0xc2, 0xf1, 0x2d, 0x10, 0x51, 0x13, 0x27,
|
||||
0xa9, 0xd1, 0x06, 0x8f, 0x1c, 0xf9, 0x64, 0x4c, 0x0e, 0x2e, 0x7b, 0x07, 0x27, 0xeb, 0xa8, 0x12,
|
||||
0xd6, 0xf2, 0x42, 0xa4, 0xa2, 0x11, 0x48, 0x36, 0xd8, 0x75, 0x74, 0x30, 0xa2, 0x3f, 0x77, 0x81,
|
||||
0x0f, 0xce, 0x67, 0x21, 0xf8, 0xeb, 0x7c, 0x29, 0xf1, 0x8f, 0x0d, 0xf6, 0x1c, 0x36, 0x1d, 0x61,
|
||||
0x97, 0xad, 0x3e, 0x7f, 0x0c, 0x7b, 0xab, 0xab, 0x9b, 0x7d, 0x04, 0xb6, 0x39, 0x58, 0xf6, 0x12,
|
||||
0x7c, 0x17, 0x90, 0x58, 0x8c, 0x5c, 0xf7, 0x48, 0xbc, 0xe4, 0x2e, 0x6b, 0x16, 0xc2, 0xe1, 0xad,
|
||||
0x09, 0xb2, 0xa7, 0xb0, 0x7f, 0x25, 0x72, 0x69, 0xa5, 0x46, 0x47, 0xee, 0x27, 0xfd, 0x7e, 0xf6,
|
||||
0x0c, 0xfc, 0x3b, 0x46, 0xc2, 0xa6, 0x30, 0xf9, 0x5b, 0x59, 0x97, 0x9e, 0x24, 0xed, 0x72, 0xde,
|
||||
0xc0, 0x89, 0xd4, 0xf7, 0x0f, 0x76, 0x3e, 0x1d, 0xbd, 0xed, 0x45, 0x6b, 0x2d, 0xbc, 0x9f, 0xf3,
|
||||
0x42, 0xd2, 0xef, 0x3a, 0x8b, 0x72, 0xad, 0xe2, 0x8e, 0x0a, 0x25, 0x5a, 0x32, 0xb5, 0x12, 0xd8,
|
||||
0xdd, 0x78, 0x3c, 0x14, 0x86, 0xdd, 0xef, 0x2a, 0x04, 0x86, 0xc5, 0xf0, 0xc9, 0xb2, 0x5d, 0x27,
|
||||
0xbf, 0xbe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x98, 0xa1, 0x9e, 0x88, 0x03, 0x00, 0x00,
|
||||
// 486 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x4e, 0xdb, 0x40,
|
||||
0x10, 0x86, 0x31, 0xa1, 0x50, 0x06, 0x01, 0xee, 0x5a, 0x54, 0x46, 0xe2, 0x80, 0x7c, 0x29, 0xaa,
|
||||
0x6a, 0xbb, 0xd0, 0x43, 0x55, 0x55, 0xaa, 0x94, 0x00, 0x51, 0x0f, 0x69, 0x88, 0x0c, 0x52, 0xd4,
|
||||
0x5e, 0xdc, 0xb5, 0xd9, 0xb8, 0xab, 0xc6, 0xb3, 0xae, 0x77, 0x1d, 0xd1, 0x77, 0xe9, 0x43, 0xf4,
|
||||
0x11, 0xab, 0xac, 0x5d, 0xdb, 0x49, 0x00, 0x71, 0xdb, 0xf9, 0xff, 0xf9, 0x7e, 0xaf, 0xbc, 0x33,
|
||||
0xf0, 0x46, 0x64, 0x0c, 0x63, 0x86, 0xb2, 0x90, 0x7e, 0x96, 0x0b, 0x25, 0x7c, 0x95, 0xd3, 0x98,
|
||||
0xf9, 0xb3, 0xd3, 0xf2, 0x10, 0xc6, 0x02, 0x27, 0x3c, 0xf1, 0xb4, 0x47, 0x0e, 0x9b, 0xee, 0x52,
|
||||
0xf1, 0x74, 0x93, 0x37, 0x3b, 0x75, 0xfe, 0x6c, 0xc0, 0xce, 0xcd, 0xbc, 0x38, 0xd7, 0x00, 0xf9,
|
||||
0x0e, 0x56, 0x96, 0x8b, 0x88, 0x46, 0x7c, 0xca, 0xd5, 0xef, 0x50, 0xd2, 0x34, 0x9b, 0xb2, 0xdc,
|
||||
0x36, 0x8e, 0x8d, 0x93, 0x9d, 0x33, 0xd7, 0x7b, 0x30, 0xc8, 0x1b, 0x35, 0xd4, 0x75, 0x09, 0x7d,
|
||||
0x5e, 0x0b, 0x48, 0xb6, 0xa2, 0x92, 0x31, 0x98, 0xb1, 0x40, 0xa9, 0x28, 0xaa, 0x3a, 0x7e, 0x5d,
|
||||
0xc7, 0xbf, 0x7e, 0x24, 0xfe, 0xbc, 0x42, 0x9a, 0xec, 0xfd, 0x78, 0x51, 0x22, 0xb7, 0x70, 0x90,
|
||||
0x53, 0xc5, 0xc2, 0x29, 0x4f, 0xb9, 0xe2, 0x98, 0xd4, 0xe9, 0x1d, 0x9d, 0xee, 0x3d, 0x92, 0x1e,
|
||||
0x50, 0xc5, 0x06, 0x15, 0xd6, 0x7c, 0xc1, 0xca, 0x57, 0x65, 0xf2, 0x1e, 0xec, 0x94, 0xde, 0x85,
|
||||
0x58, 0xa4, 0x11, 0xcb, 0x43, 0x31, 0x09, 0xa9, 0x52, 0x39, 0x8f, 0x0a, 0xc5, 0xa4, 0xbd, 0x71,
|
||||
0x6c, 0x9c, 0x74, 0x82, 0x83, 0x94, 0xde, 0x0d, 0xb5, 0x7d, 0x35, 0xe9, 0xd6, 0x26, 0xf9, 0x00,
|
||||
0x87, 0x4b, 0x20, 0xa2, 0x50, 0x54, 0x71, 0x81, 0xd2, 0x7e, 0xa6, 0xc9, 0x97, 0x6d, 0xb2, 0x71,
|
||||
0xc9, 0x27, 0x38, 0x5a, 0x44, 0x53, 0x26, 0x25, 0x4d, 0x58, 0xc8, 0x66, 0x0c, 0x95, 0xb4, 0x37,
|
||||
0x35, 0x6d, 0xb7, 0xe8, 0x2f, 0x65, 0xc3, 0xa5, 0xf6, 0x89, 0x0b, 0xd6, 0x22, 0x3f, 0xe5, 0xf8,
|
||||
0x53, 0xda, 0x5b, 0x1a, 0x33, 0x5b, 0xd8, 0x60, 0xae, 0xf7, 0xb6, 0x61, 0xab, 0xfa, 0x75, 0x4e,
|
||||
0x1f, 0xc8, 0xea, 0xc3, 0x92, 0xb7, 0x60, 0xe9, 0x06, 0x8e, 0x49, 0xcb, 0xd5, 0x43, 0x62, 0x04,
|
||||
0xf7, 0x59, 0xce, 0x5f, 0x03, 0xf6, 0x97, 0x9e, 0x90, 0x8c, 0xe1, 0xf9, 0x2d, 0x8b, 0xb9, 0xe4,
|
||||
0x02, 0x35, 0xba, 0x77, 0xf6, 0xf1, 0xe9, 0x03, 0x50, 0xd7, 0x17, 0x55, 0x44, 0x50, 0x87, 0x39,
|
||||
0x17, 0x60, 0x2e, 0xbb, 0x64, 0x0f, 0xa0, 0x3b, 0x18, 0x77, 0xbf, 0x5e, 0x87, 0x57, 0xfd, 0xbe,
|
||||
0xb9, 0x46, 0x76, 0x61, 0xfb, 0x7f, 0x3d, 0x34, 0x0d, 0xf2, 0x02, 0x76, 0xab, 0x72, 0xd4, 0x0d,
|
||||
0x2e, 0x87, 0x37, 0xe6, 0xba, 0xf3, 0x0a, 0xac, 0x7b, 0xc6, 0x82, 0x98, 0xd0, 0xf9, 0x95, 0x49,
|
||||
0x7d, 0xe1, 0x4e, 0x30, 0x3f, 0xf6, 0x66, 0x70, 0xc4, 0xc5, 0xc3, 0x37, 0xef, 0x99, 0xad, 0xfd,
|
||||
0x1a, 0xcd, 0xad, 0x91, 0xf1, 0xad, 0x97, 0x70, 0xf5, 0xa3, 0x88, 0xbc, 0x58, 0xa4, 0x7e, 0x49,
|
||||
0xb9, 0x1c, 0xa5, 0xca, 0x8b, 0x94, 0x61, 0xf9, 0xea, 0x7e, 0x13, 0xe8, 0x96, 0x1b, 0x9e, 0x30,
|
||||
0x74, 0x93, 0x66, 0xd1, 0xa3, 0x4d, 0x2d, 0xbf, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x13, 0xe2,
|
||||
0xd9, 0x56, 0x0c, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
|
70
vendor/github.com/emicklei/go-restful/.gitignore
generated
vendored
Normal file
70
vendor/github.com/emicklei/go-restful/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
restful.html
|
||||
|
||||
*.out
|
||||
|
||||
tmp.prof
|
||||
|
||||
go-restful.test
|
||||
|
||||
examples/restful-basic-authentication
|
||||
|
||||
examples/restful-encoding-filter
|
||||
|
||||
examples/restful-filters
|
||||
|
||||
examples/restful-hello-world
|
||||
|
||||
examples/restful-resource-functions
|
||||
|
||||
examples/restful-serve-static
|
||||
|
||||
examples/restful-user-service
|
||||
|
||||
*.DS_Store
|
||||
examples/restful-user-resource
|
||||
|
||||
examples/restful-multi-containers
|
||||
|
||||
examples/restful-form-handling
|
||||
|
||||
examples/restful-CORS-filter
|
||||
|
||||
examples/restful-options-filter
|
||||
|
||||
examples/restful-curly-router
|
||||
|
||||
examples/restful-cpuprofiler-service
|
||||
|
||||
examples/restful-pre-post-filters
|
||||
|
||||
curly.prof
|
||||
|
||||
examples/restful-NCSA-logging
|
||||
|
||||
examples/restful-html-template
|
||||
|
||||
s.html
|
||||
restful-path-tail
|
6
vendor/github.com/emicklei/go-restful/.travis.yml
generated
vendored
Normal file
6
vendor/github.com/emicklei/go-restful/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
|
||||
script: go test -v
|
261
vendor/github.com/emicklei/go-restful/CHANGES.md
generated
vendored
Normal file
261
vendor/github.com/emicklei/go-restful/CHANGES.md
generated
vendored
Normal file
|
@ -0,0 +1,261 @@
|
|||
## Change history of go-restful
|
||||
|
||||
|
||||
v2.9.2
|
||||
|
||||
- Reduce allocations in per-request methods to improve performance (#395)
|
||||
|
||||
v2.9.1
|
||||
|
||||
- Fix issue with default responses and invalid status code 0. (#393)
|
||||
|
||||
v2.9.0
|
||||
|
||||
- add per Route content encoding setting (overrides container setting)
|
||||
|
||||
v2.8.0
|
||||
|
||||
- add Request.QueryParameters()
|
||||
- add json-iterator (via build tag)
|
||||
- disable vgo module (until log is moved)
|
||||
|
||||
v2.7.1
|
||||
|
||||
- add vgo module
|
||||
|
||||
v2.6.1
|
||||
|
||||
- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+)
|
||||
|
||||
v2.6.0
|
||||
|
||||
- Make JSR 311 routing and path param processing consistent
|
||||
- Adding description to RouteBuilder.Reads()
|
||||
- Update example for Swagger12 and OpenAPI
|
||||
|
||||
2017-09-13
|
||||
|
||||
- added route condition functions using `.If(func)` in route building.
|
||||
|
||||
2017-02-16
|
||||
|
||||
- solved issue #304, make operation names unique
|
||||
|
||||
2017-01-30
|
||||
|
||||
[IMPORTANT] For swagger users, change your import statement to:
|
||||
swagger "github.com/emicklei/go-restful-swagger12"
|
||||
|
||||
- moved swagger 1.2 code to go-restful-swagger12
|
||||
- created TAG 2.0.0
|
||||
|
||||
2017-01-27
|
||||
|
||||
- remove defer request body close
|
||||
- expose Dispatch for testing filters and Routefunctions
|
||||
- swagger response model cannot be array
|
||||
- created TAG 1.0.0
|
||||
|
||||
2016-12-22
|
||||
|
||||
- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
|
||||
|
||||
2016-11-26
|
||||
|
||||
- Default change! now use CurlyRouter (was RouterJSR311)
|
||||
- Default change! no more caching of request content
|
||||
- Default change! do not recover from panics
|
||||
|
||||
2016-09-22
|
||||
|
||||
- fix the DefaultRequestContentType feature
|
||||
|
||||
2016-02-14
|
||||
|
||||
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
|
||||
- add constructors for custom entity accessors for xml and json
|
||||
|
||||
2015-09-27
|
||||
|
||||
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
|
||||
|
||||
2015-09-25
|
||||
|
||||
- fixed problem with changing Header after WriteHeader (issue 235)
|
||||
|
||||
2015-09-14
|
||||
|
||||
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
|
||||
- added support for custom EntityReaderWriters.
|
||||
|
||||
2015-08-06
|
||||
|
||||
- add support for reading entities from compressed request content
|
||||
- use sync.Pool for compressors of http response and request body
|
||||
- add Description to Parameter for documentation in Swagger UI
|
||||
|
||||
2015-03-20
|
||||
|
||||
- add configurable logging
|
||||
|
||||
2015-03-18
|
||||
|
||||
- if not specified, the Operation is derived from the Route function
|
||||
|
||||
2015-03-17
|
||||
|
||||
- expose Parameter creation functions
|
||||
- make trace logger an interface
|
||||
- fix OPTIONSFilter
|
||||
- customize rendering of ServiceError
|
||||
- JSR311 router now handles wildcards
|
||||
- add Notes to Route
|
||||
|
||||
2014-11-27
|
||||
|
||||
- (api add) PrettyPrint per response. (as proposed in #167)
|
||||
|
||||
2014-11-12
|
||||
|
||||
- (api add) ApiVersion(.) for documentation in Swagger UI
|
||||
|
||||
2014-11-10
|
||||
|
||||
- (api change) struct fields tagged with "description" show up in Swagger UI
|
||||
|
||||
2014-10-31
|
||||
|
||||
- (api change) ReturnsError -> Returns
|
||||
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
|
||||
- fix swagger nested structs
|
||||
- sort Swagger response messages by code
|
||||
|
||||
2014-10-23
|
||||
|
||||
- (api add) ReturnsError allows you to document Http codes in swagger
|
||||
- fixed problem with greedy CurlyRouter
|
||||
- (api add) Access-Control-Max-Age in CORS
|
||||
- add tracing functionality (injectable) for debugging purposes
|
||||
- support JSON parse 64bit int
|
||||
- fix empty parameters for swagger
|
||||
- WebServicesUrl is now optional for swagger
|
||||
- fixed duplicate AccessControlAllowOrigin in CORS
|
||||
- (api change) expose ServeMux in container
|
||||
- (api add) added AllowedDomains in CORS
|
||||
- (api add) ParameterNamed for detailed documentation
|
||||
|
||||
2014-04-16
|
||||
|
||||
- (api add) expose constructor of Request for testing.
|
||||
|
||||
2014-06-27
|
||||
|
||||
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
|
||||
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
|
||||
|
||||
2014-07-03
|
||||
|
||||
- (api add) CORS can be configured with a list of allowed domains
|
||||
|
||||
2014-03-12
|
||||
|
||||
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
|
||||
|
||||
2014-02-26
|
||||
|
||||
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
|
||||
|
||||
2014-02-17
|
||||
|
||||
- (api change) renamed parameter constants (go-lint checks)
|
||||
|
||||
2014-01-10
|
||||
|
||||
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
|
||||
|
||||
2014-01-07
|
||||
|
||||
- (api change) Write* methods in Response now return the error or nil.
|
||||
- added example of serving HTML from a Go template.
|
||||
- fixed comparing Allowed headers in CORS (is now case-insensitive)
|
||||
|
||||
2013-11-13
|
||||
|
||||
- (api add) Response knows how many bytes are written to the response body.
|
||||
|
||||
2013-10-29
|
||||
|
||||
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
|
||||
|
||||
2013-10-04
|
||||
|
||||
- (api add) Response knows what HTTP status has been written
|
||||
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
|
||||
|
||||
2013-09-12
|
||||
|
||||
- (api change) Router interface simplified
|
||||
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
|
||||
|
||||
2013-08-05
|
||||
- add OPTIONS support
|
||||
- add CORS support
|
||||
|
||||
2013-08-27
|
||||
|
||||
- fixed some reported issues (see github)
|
||||
- (api change) deprecated use of WriteError; use WriteErrorString instead
|
||||
|
||||
2014-04-15
|
||||
|
||||
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
|
||||
|
||||
2013-08-08
|
||||
|
||||
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
|
||||
- (api add) the swagger package has be extended to have a UI per container.
|
||||
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
|
||||
- (api add) WriteErrorString to Response
|
||||
|
||||
Important API changes:
|
||||
|
||||
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
|
||||
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
|
||||
|
||||
|
||||
2013-07-06
|
||||
|
||||
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
|
||||
|
||||
2013-06-19
|
||||
|
||||
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
|
||||
|
||||
2013-06-03
|
||||
|
||||
- (api change) removed Dispatcher interface, hide PathExpression
|
||||
- changed receiver names of type functions to be more idiomatic Go
|
||||
|
||||
2013-06-02
|
||||
|
||||
- (optimize) Cache the RegExp compilation of Paths.
|
||||
|
||||
2013-05-22
|
||||
|
||||
- (api add) Added support for request/response filter functions
|
||||
|
||||
2013-05-18
|
||||
|
||||
|
||||
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
|
||||
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
|
||||
|
||||
[2012-11-14 .. 2013-05-18>
|
||||
|
||||
- See https://github.com/emicklei/go-restful/commits
|
||||
|
||||
2012-11-14
|
||||
|
||||
- Initial commit
|
||||
|
||||
|
22
vendor/github.com/emicklei/go-restful/LICENSE
generated
vendored
Normal file
22
vendor/github.com/emicklei/go-restful/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2012,2013 Ernest Micklei
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
7
vendor/github.com/emicklei/go-restful/Makefile
generated
vendored
Normal file
7
vendor/github.com/emicklei/go-restful/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
all: test
|
||||
|
||||
test:
|
||||
go test -v .
|
||||
|
||||
ex:
|
||||
cd examples && ls *.go | xargs go build -o /tmp/ignore
|
88
vendor/github.com/emicklei/go-restful/README.md
generated
vendored
Normal file
88
vendor/github.com/emicklei/go-restful/README.md
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
go-restful
|
||||
==========
|
||||
package for building REST-style Web Services using Google Go
|
||||
|
||||
[](https://travis-ci.org/emicklei/go-restful)
|
||||
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
||||
[](https://godoc.org/github.com/emicklei/go-restful)
|
||||
|
||||
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
|
||||
|
||||
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
|
||||
|
||||
- GET = Retrieve a representation of a resource
|
||||
- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
|
||||
- PUT = Create if you are sending the full content of the specified resource (URI).
|
||||
- PUT = Update if you are updating the full content of the specified resource.
|
||||
- DELETE = Delete if you are requesting the server to delete the resource
|
||||
- PATCH = Update partial content of a resource
|
||||
- OPTIONS = Get information about the communication options for the request URI
|
||||
|
||||
### Example
|
||||
|
||||
```Go
|
||||
ws := new(restful.WebService)
|
||||
ws.
|
||||
Path("/users").
|
||||
Consumes(restful.MIME_XML, restful.MIME_JSON).
|
||||
Produces(restful.MIME_JSON, restful.MIME_XML)
|
||||
|
||||
ws.Route(ws.GET("/{user-id}").To(u.findUser).
|
||||
Doc("get a user").
|
||||
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
|
||||
Writes(User{}))
|
||||
...
|
||||
|
||||
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
|
||||
id := request.PathParameter("user-id")
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
|
||||
|
||||
### Features
|
||||
|
||||
- Routes for request → function mapping with path parameter (e.g. {id}) support
|
||||
- Configurable router:
|
||||
- (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
|
||||
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
|
||||
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
|
||||
- Response API for writing structs to JSON/XML and setting headers
|
||||
- Customizable encoding using EntityReaderWriter registration
|
||||
- Filters for intercepting the request → response flow on Service or Route level
|
||||
- Request-scoped variables using attributes
|
||||
- Containers for WebServices on different HTTP endpoints
|
||||
- Content encoding (gzip,deflate) of request and response payloads
|
||||
- Automatic responses on OPTIONS (using a filter)
|
||||
- Automatic CORS request handling (using a filter)
|
||||
- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12))
|
||||
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
|
||||
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
||||
- Configurable (trace) logging
|
||||
- Customizable gzip/deflate readers and writers using CompressorProvider registration
|
||||
|
||||
## How to customize
|
||||
There are several hooks to customize the behavior of the go-restful package.
|
||||
|
||||
- Router algorithm
|
||||
- Panic recovery
|
||||
- JSON decoder
|
||||
- Trace logging
|
||||
- Compression
|
||||
- Encoders for other serializers
|
||||
- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .`
|
||||
|
||||
TODO: write examples of these.
|
||||
|
||||
## Resources
|
||||
|
||||
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
|
||||
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
|
||||
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
|
||||
- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia)
|
||||
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
|
||||
|
||||
Type ```git shortlog -s``` for a full list of contributors.
|
||||
|
||||
© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
1
vendor/github.com/emicklei/go-restful/Srcfile
generated
vendored
Normal file
1
vendor/github.com/emicklei/go-restful/Srcfile
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
{"SkipDirs": ["examples"]}
|
10
vendor/github.com/emicklei/go-restful/bench_test.sh
generated
vendored
Normal file
10
vendor/github.com/emicklei/go-restful/bench_test.sh
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
|
||||
|
||||
go test -c
|
||||
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
|
||||
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
|
||||
|
||||
#go tool pprof go-restful.test tmp.prof
|
||||
go tool pprof go-restful.test curly.prof
|
||||
|
||||
|
123
vendor/github.com/emicklei/go-restful/compress.go
generated
vendored
Normal file
123
vendor/github.com/emicklei/go-restful/compress.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
|
||||
var EnableContentEncoding = false
|
||||
|
||||
// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
|
||||
type CompressingResponseWriter struct {
|
||||
writer http.ResponseWriter
|
||||
compressor io.WriteCloser
|
||||
encoding string
|
||||
}
|
||||
|
||||
// Header is part of http.ResponseWriter interface
|
||||
func (c *CompressingResponseWriter) Header() http.Header {
|
||||
return c.writer.Header()
|
||||
}
|
||||
|
||||
// WriteHeader is part of http.ResponseWriter interface
|
||||
func (c *CompressingResponseWriter) WriteHeader(status int) {
|
||||
c.writer.WriteHeader(status)
|
||||
}
|
||||
|
||||
// Write is part of http.ResponseWriter interface
|
||||
// It is passed through the compressor
|
||||
func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
|
||||
if c.isCompressorClosed() {
|
||||
return -1, errors.New("Compressing error: tried to write data using closed compressor")
|
||||
}
|
||||
return c.compressor.Write(bytes)
|
||||
}
|
||||
|
||||
// CloseNotify is part of http.CloseNotifier interface
|
||||
func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
|
||||
return c.writer.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Close the underlying compressor
|
||||
func (c *CompressingResponseWriter) Close() error {
|
||||
if c.isCompressorClosed() {
|
||||
return errors.New("Compressing error: tried to close already closed compressor")
|
||||
}
|
||||
|
||||
c.compressor.Close()
|
||||
if ENCODING_GZIP == c.encoding {
|
||||
currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
|
||||
}
|
||||
if ENCODING_DEFLATE == c.encoding {
|
||||
currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
|
||||
}
|
||||
// gc hint needed?
|
||||
c.compressor = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CompressingResponseWriter) isCompressorClosed() bool {
|
||||
return nil == c.compressor
|
||||
}
|
||||
|
||||
// Hijack implements the Hijacker interface
|
||||
// This is especially useful when combining Container.EnabledContentEncoding
|
||||
// in combination with websockets (for instance gorilla/websocket)
|
||||
func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
hijacker, ok := c.writer.(http.Hijacker)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
|
||||
}
|
||||
return hijacker.Hijack()
|
||||
}
|
||||
|
||||
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
|
||||
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
|
||||
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
|
||||
gi := strings.Index(header, ENCODING_GZIP)
|
||||
zi := strings.Index(header, ENCODING_DEFLATE)
|
||||
// use in order of appearance
|
||||
if gi == -1 {
|
||||
return zi != -1, ENCODING_DEFLATE
|
||||
} else if zi == -1 {
|
||||
return gi != -1, ENCODING_GZIP
|
||||
} else {
|
||||
if gi < zi {
|
||||
return true, ENCODING_GZIP
|
||||
}
|
||||
return true, ENCODING_DEFLATE
|
||||
}
|
||||
}
|
||||
|
||||
// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
|
||||
func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
|
||||
httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
|
||||
c := new(CompressingResponseWriter)
|
||||
c.writer = httpWriter
|
||||
var err error
|
||||
if ENCODING_GZIP == encoding {
|
||||
w := currentCompressorProvider.AcquireGzipWriter()
|
||||
w.Reset(httpWriter)
|
||||
c.compressor = w
|
||||
c.encoding = ENCODING_GZIP
|
||||
} else if ENCODING_DEFLATE == encoding {
|
||||
w := currentCompressorProvider.AcquireZlibWriter()
|
||||
w.Reset(httpWriter)
|
||||
c.compressor = w
|
||||
c.encoding = ENCODING_DEFLATE
|
||||
} else {
|
||||
return nil, errors.New("Unknown encoding:" + encoding)
|
||||
}
|
||||
return c, err
|
||||
}
|
103
vendor/github.com/emicklei/go-restful/compressor_cache.go
generated
vendored
Normal file
103
vendor/github.com/emicklei/go-restful/compressor_cache.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
)
|
||||
|
||||
// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
|
||||
// of writers and readers (resources).
|
||||
// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
|
||||
type BoundedCachedCompressors struct {
|
||||
gzipWriters chan *gzip.Writer
|
||||
gzipReaders chan *gzip.Reader
|
||||
zlibWriters chan *zlib.Writer
|
||||
writersCapacity int
|
||||
readersCapacity int
|
||||
}
|
||||
|
||||
// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
|
||||
func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
|
||||
b := &BoundedCachedCompressors{
|
||||
gzipWriters: make(chan *gzip.Writer, writersCapacity),
|
||||
gzipReaders: make(chan *gzip.Reader, readersCapacity),
|
||||
zlibWriters: make(chan *zlib.Writer, writersCapacity),
|
||||
writersCapacity: writersCapacity,
|
||||
readersCapacity: readersCapacity,
|
||||
}
|
||||
for ix := 0; ix < writersCapacity; ix++ {
|
||||
b.gzipWriters <- newGzipWriter()
|
||||
b.zlibWriters <- newZlibWriter()
|
||||
}
|
||||
for ix := 0; ix < readersCapacity; ix++ {
|
||||
b.gzipReaders <- newGzipReader()
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
|
||||
var writer *gzip.Writer
|
||||
select {
|
||||
case writer, _ = <-b.gzipWriters:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
writer = newGzipWriter()
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.gzipWriters) < b.writersCapacity {
|
||||
b.gzipWriters <- w
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
|
||||
var reader *gzip.Reader
|
||||
select {
|
||||
case reader, _ = <-b.gzipReaders:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
reader = newGzipReader()
|
||||
}
|
||||
return reader
|
||||
}
|
||||
|
||||
// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.gzipReaders) < b.readersCapacity {
|
||||
b.gzipReaders <- r
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
|
||||
var writer *zlib.Writer
|
||||
select {
|
||||
case writer, _ = <-b.zlibWriters:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
writer = newZlibWriter()
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.zlibWriters) < b.writersCapacity {
|
||||
b.zlibWriters <- w
|
||||
}
|
||||
}
|
91
vendor/github.com/emicklei/go-restful/compressor_pools.go
generated
vendored
Normal file
91
vendor/github.com/emicklei/go-restful/compressor_pools.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
|
||||
type SyncPoolCompessors struct {
|
||||
GzipWriterPool *sync.Pool
|
||||
GzipReaderPool *sync.Pool
|
||||
ZlibWriterPool *sync.Pool
|
||||
}
|
||||
|
||||
// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
|
||||
func NewSyncPoolCompessors() *SyncPoolCompessors {
|
||||
return &SyncPoolCompessors{
|
||||
GzipWriterPool: &sync.Pool{
|
||||
New: func() interface{} { return newGzipWriter() },
|
||||
},
|
||||
GzipReaderPool: &sync.Pool{
|
||||
New: func() interface{} { return newGzipReader() },
|
||||
},
|
||||
ZlibWriterPool: &sync.Pool{
|
||||
New: func() interface{} { return newZlibWriter() },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
|
||||
return s.GzipWriterPool.Get().(*gzip.Writer)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
|
||||
s.GzipWriterPool.Put(w)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
|
||||
return s.GzipReaderPool.Get().(*gzip.Reader)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
|
||||
s.GzipReaderPool.Put(r)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
|
||||
return s.ZlibWriterPool.Get().(*zlib.Writer)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
|
||||
s.ZlibWriterPool.Put(w)
|
||||
}
|
||||
|
||||
func newGzipWriter() *gzip.Writer {
|
||||
// create with an empty bytes writer; it will be replaced before using the gzipWriter
|
||||
writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
func newGzipReader() *gzip.Reader {
|
||||
// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
|
||||
// we can safely use currentCompressProvider because it is set on package initialization.
|
||||
w := currentCompressorProvider.AcquireGzipWriter()
|
||||
defer currentCompressorProvider.ReleaseGzipWriter(w)
|
||||
b := new(bytes.Buffer)
|
||||
w.Reset(b)
|
||||
w.Flush()
|
||||
w.Close()
|
||||
reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return reader
|
||||
}
|
||||
|
||||
func newZlibWriter() *zlib.Writer {
|
||||
writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return writer
|
||||
}
|
54
vendor/github.com/emicklei/go-restful/compressors.go
generated
vendored
Normal file
54
vendor/github.com/emicklei/go-restful/compressors.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
)
|
||||
|
||||
// CompressorProvider describes a component that can provider compressors for the std methods.
|
||||
type CompressorProvider interface {
|
||||
// Returns a *gzip.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
AcquireGzipWriter() *gzip.Writer
|
||||
|
||||
// Releases an acquired *gzip.Writer.
|
||||
ReleaseGzipWriter(w *gzip.Writer)
|
||||
|
||||
// Returns a *gzip.Reader which needs to be released later.
|
||||
AcquireGzipReader() *gzip.Reader
|
||||
|
||||
// Releases an acquired *gzip.Reader.
|
||||
ReleaseGzipReader(w *gzip.Reader)
|
||||
|
||||
// Returns a *zlib.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
AcquireZlibWriter() *zlib.Writer
|
||||
|
||||
// Releases an acquired *zlib.Writer.
|
||||
ReleaseZlibWriter(w *zlib.Writer)
|
||||
}
|
||||
|
||||
// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
|
||||
var currentCompressorProvider CompressorProvider
|
||||
|
||||
func init() {
|
||||
currentCompressorProvider = NewSyncPoolCompessors()
|
||||
}
|
||||
|
||||
// CurrentCompressorProvider returns the current CompressorProvider.
|
||||
// It is initialized using a SyncPoolCompessors.
|
||||
func CurrentCompressorProvider() CompressorProvider {
|
||||
return currentCompressorProvider
|
||||
}
|
||||
|
||||
// SetCompressorProvider sets the actual provider of compressors (zlib or gzip).
|
||||
func SetCompressorProvider(p CompressorProvider) {
|
||||
if p == nil {
|
||||
panic("cannot set compressor provider to nil")
|
||||
}
|
||||
currentCompressorProvider = p
|
||||
}
|
30
vendor/github.com/emicklei/go-restful/constants.go
generated
vendored
Normal file
30
vendor/github.com/emicklei/go-restful/constants.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
const (
|
||||
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
|
||||
|
||||
HEADER_Allow = "Allow"
|
||||
HEADER_Accept = "Accept"
|
||||
HEADER_Origin = "Origin"
|
||||
HEADER_ContentType = "Content-Type"
|
||||
HEADER_LastModified = "Last-Modified"
|
||||
HEADER_AcceptEncoding = "Accept-Encoding"
|
||||
HEADER_ContentEncoding = "Content-Encoding"
|
||||
HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
|
||||
HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
|
||||
HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
|
||||
HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
|
||||
HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
|
||||
HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
|
||||
HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
|
||||
HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
|
||||
|
||||
ENCODING_GZIP = "gzip"
|
||||
ENCODING_DEFLATE = "deflate"
|
||||
)
|
377
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
Normal file
377
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
Normal file
|
@ -0,0 +1,377 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
||||
// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
|
||||
// The requests are further dispatched to routes of WebServices using a RouteSelector
|
||||
type Container struct {
|
||||
webServicesLock sync.RWMutex
|
||||
webServices []*WebService
|
||||
ServeMux *http.ServeMux
|
||||
isRegisteredOnRoot bool
|
||||
containerFilters []FilterFunction
|
||||
doNotRecover bool // default is true
|
||||
recoverHandleFunc RecoverHandleFunction
|
||||
serviceErrorHandleFunc ServiceErrorHandleFunction
|
||||
router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative)
|
||||
contentEncodingEnabled bool // default is false
|
||||
}
|
||||
|
||||
// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
|
||||
func NewContainer() *Container {
|
||||
return &Container{
|
||||
webServices: []*WebService{},
|
||||
ServeMux: http.NewServeMux(),
|
||||
isRegisteredOnRoot: false,
|
||||
containerFilters: []FilterFunction{},
|
||||
doNotRecover: true,
|
||||
recoverHandleFunc: logStackOnRecover,
|
||||
serviceErrorHandleFunc: writeServiceError,
|
||||
router: CurlyRouter{},
|
||||
contentEncodingEnabled: false}
|
||||
}
|
||||
|
||||
// RecoverHandleFunction declares functions that can be used to handle a panic situation.
|
||||
// The first argument is what recover() returns. The second must be used to communicate an error response.
|
||||
type RecoverHandleFunction func(interface{}, http.ResponseWriter)
|
||||
|
||||
// RecoverHandler changes the default function (logStackOnRecover) to be called
|
||||
// when a panic is detected. DoNotRecover must be have its default value (=false).
|
||||
func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
|
||||
c.recoverHandleFunc = handler
|
||||
}
|
||||
|
||||
// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
|
||||
// The first argument is the service error, the second is the request that resulted in the error and
|
||||
// the third must be used to communicate an error response.
|
||||
type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
|
||||
|
||||
// ServiceErrorHandler changes the default function (writeServiceError) to be called
|
||||
// when a ServiceError is detected.
|
||||
func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
|
||||
c.serviceErrorHandleFunc = handler
|
||||
}
|
||||
|
||||
// DoNotRecover controls whether panics will be caught to return HTTP 500.
|
||||
// If set to true, Route functions are responsible for handling any error situation.
|
||||
// Default value is true.
|
||||
func (c *Container) DoNotRecover(doNot bool) {
|
||||
c.doNotRecover = doNot
|
||||
}
|
||||
|
||||
// Router changes the default Router (currently CurlyRouter)
|
||||
func (c *Container) Router(aRouter RouteSelector) {
|
||||
c.router = aRouter
|
||||
}
|
||||
|
||||
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
|
||||
func (c *Container) EnableContentEncoding(enabled bool) {
|
||||
c.contentEncodingEnabled = enabled
|
||||
}
|
||||
|
||||
// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
|
||||
func (c *Container) Add(service *WebService) *Container {
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
|
||||
// if rootPath was not set then lazy initialize it
|
||||
if len(service.rootPath) == 0 {
|
||||
service.Path("/")
|
||||
}
|
||||
|
||||
// cannot have duplicate root paths
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
log.Printf("WebService with duplicate root path detected:['%v']", each)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// If not registered on root then add specific mapping
|
||||
if !c.isRegisteredOnRoot {
|
||||
c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
|
||||
}
|
||||
c.webServices = append(c.webServices, service)
|
||||
return c
|
||||
}
|
||||
|
||||
// addHandler may set a new HandleFunc for the serveMux
|
||||
// this function must run inside the critical region protected by the webServicesLock.
|
||||
// returns true if the function was registered on root ("/")
|
||||
func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
|
||||
pattern := fixedPrefixPath(service.RootPath())
|
||||
// check if root path registration is needed
|
||||
if "/" == pattern || "" == pattern {
|
||||
serveMux.HandleFunc("/", c.dispatch)
|
||||
return true
|
||||
}
|
||||
// detect if registration already exists
|
||||
alreadyMapped := false
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
alreadyMapped = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !alreadyMapped {
|
||||
serveMux.HandleFunc(pattern, c.dispatch)
|
||||
if !strings.HasSuffix(pattern, "/") {
|
||||
serveMux.HandleFunc(pattern+"/", c.dispatch)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Container) Remove(ws *WebService) error {
|
||||
if c.ServeMux == http.DefaultServeMux {
|
||||
errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
|
||||
log.Print(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
// build a new ServeMux and re-register all WebServices
|
||||
newServeMux := http.NewServeMux()
|
||||
newServices := []*WebService{}
|
||||
newIsRegisteredOnRoot := false
|
||||
for _, each := range c.webServices {
|
||||
if each.rootPath != ws.rootPath {
|
||||
// If not registered on root then add specific mapping
|
||||
if !newIsRegisteredOnRoot {
|
||||
newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
|
||||
}
|
||||
newServices = append(newServices, each)
|
||||
}
|
||||
}
|
||||
c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
|
||||
return nil
|
||||
}
|
||||
|
||||
// logStackOnRecover is the default RecoverHandleFunction and is called
|
||||
// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
|
||||
// Default implementation logs the stacktrace and writes the stacktrace on the response.
|
||||
// This may be a security issue as it exposes sourcecode information.
|
||||
func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason))
|
||||
for i := 2; ; i += 1 {
|
||||
_, file, line, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
|
||||
}
|
||||
log.Print(buffer.String())
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
httpWriter.Write(buffer.Bytes())
|
||||
}
|
||||
|
||||
// writeServiceError is the default ServiceErrorHandleFunction and is called
|
||||
// when a ServiceError is returned during route selection. Default implementation
|
||||
// calls resp.WriteErrorString(err.Code, err.Message)
|
||||
func writeServiceError(err ServiceError, req *Request, resp *Response) {
|
||||
resp.WriteErrorString(err.Code, err.Message)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
if httpWriter == nil {
|
||||
panic("httpWriter cannot be nil")
|
||||
}
|
||||
if httpRequest == nil {
|
||||
panic("httpRequest cannot be nil")
|
||||
}
|
||||
c.dispatch(httpWriter, httpRequest)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
writer := httpWriter
|
||||
|
||||
// CompressingResponseWriter should be closed after all operations are done
|
||||
defer func() {
|
||||
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
|
||||
compressWriter.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Instal panic recovery unless told otherwise
|
||||
if !c.doNotRecover { // catch all for 500 response
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
c.recoverHandleFunc(r, writer)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Find best match Route ; err is non nil if no match was found
|
||||
var webService *WebService
|
||||
var route *Route
|
||||
var err error
|
||||
func() {
|
||||
c.webServicesLock.RLock()
|
||||
defer c.webServicesLock.RUnlock()
|
||||
webService, route, err = c.router.SelectRoute(
|
||||
c.webServices,
|
||||
httpRequest)
|
||||
}()
|
||||
|
||||
// Detect if compression is needed
|
||||
// assume without compression, test for override
|
||||
contentEncodingEnabled := c.contentEncodingEnabled
|
||||
if route != nil && route.contentEncodingEnabled != nil {
|
||||
contentEncodingEnabled = *route.contentEncodingEnabled
|
||||
}
|
||||
if contentEncodingEnabled {
|
||||
doCompress, encoding := wantsCompressedResponse(httpRequest)
|
||||
if doCompress {
|
||||
var err error
|
||||
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
|
||||
if err != nil {
|
||||
log.Print("unable to install compressor: ", err)
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// a non-200 response has already been written
|
||||
// run container filters anyway ; they should not touch the response...
|
||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||
switch err.(type) {
|
||||
case ServiceError:
|
||||
ser := err.(ServiceError)
|
||||
c.serviceErrorHandleFunc(ser, req, resp)
|
||||
}
|
||||
// TODO
|
||||
}}
|
||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
|
||||
return
|
||||
}
|
||||
pathProcessor, routerProcessesPath := c.router.(PathProcessor)
|
||||
if !routerProcessesPath {
|
||||
pathProcessor = defaultPathProcessor{}
|
||||
}
|
||||
pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path)
|
||||
wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams)
|
||||
// pass through filters (if any)
|
||||
if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
|
||||
// compose filter chain
|
||||
allFilters := []FilterFunction{}
|
||||
allFilters = append(allFilters, c.containerFilters...)
|
||||
allFilters = append(allFilters, webService.filters...)
|
||||
allFilters = append(allFilters, route.Filters...)
|
||||
chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
|
||||
// handle request by route after passing all filters
|
||||
route.Function(wrappedRequest, wrappedResponse)
|
||||
}}
|
||||
chain.ProcessFilter(wrappedRequest, wrappedResponse)
|
||||
} else {
|
||||
// no filters, handle request by route
|
||||
route.Function(wrappedRequest, wrappedResponse)
|
||||
}
|
||||
}
|
||||
|
||||
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
|
||||
func fixedPrefixPath(pathspec string) string {
|
||||
varBegin := strings.Index(pathspec, "{")
|
||||
if -1 == varBegin {
|
||||
return pathspec
|
||||
}
|
||||
return pathspec[:varBegin]
|
||||
}
|
||||
|
||||
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
|
||||
func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
c.ServeMux.ServeHTTP(httpwriter, httpRequest)
|
||||
}
|
||||
|
||||
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
|
||||
func (c *Container) Handle(pattern string, handler http.Handler) {
|
||||
c.ServeMux.Handle(pattern, handler)
|
||||
}
|
||||
|
||||
// HandleWithFilter registers the handler for the given pattern.
|
||||
// Container's filter chain is applied for handler.
|
||||
// If a handler already exists for pattern, HandleWithFilter panics.
|
||||
func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
|
||||
f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
|
||||
if len(c.containerFilters) == 0 {
|
||||
handler.ServeHTTP(httpResponse, httpRequest)
|
||||
return
|
||||
}
|
||||
|
||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||
handler.ServeHTTP(httpResponse, httpRequest)
|
||||
}}
|
||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
|
||||
}
|
||||
|
||||
c.Handle(pattern, http.HandlerFunc(f))
|
||||
}
|
||||
|
||||
// Filter appends a container FilterFunction. These are called before dispatching
|
||||
// a http.Request to a WebService from the container
|
||||
func (c *Container) Filter(filter FilterFunction) {
|
||||
c.containerFilters = append(c.containerFilters, filter)
|
||||
}
|
||||
|
||||
// RegisteredWebServices returns the collections of added WebServices
|
||||
func (c *Container) RegisteredWebServices() []*WebService {
|
||||
c.webServicesLock.RLock()
|
||||
defer c.webServicesLock.RUnlock()
|
||||
result := make([]*WebService, len(c.webServices))
|
||||
for ix := range c.webServices {
|
||||
result[ix] = c.webServices[ix]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
|
||||
func (c *Container) computeAllowedMethods(req *Request) []string {
|
||||
// Go through all RegisteredWebServices() and all its Routes to collect the options
|
||||
methods := []string{}
|
||||
requestPath := req.Request.URL.Path
|
||||
for _, ws := range c.RegisteredWebServices() {
|
||||
matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
|
||||
if matches != nil {
|
||||
finalMatch := matches[len(matches)-1]
|
||||
for _, rt := range ws.Routes() {
|
||||
matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
|
||||
if matches != nil {
|
||||
lastMatch := matches[len(matches)-1]
|
||||
if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
|
||||
methods = append(methods, rt.Method)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// methods = append(methods, "OPTIONS") not sure about this
|
||||
return methods
|
||||
}
|
||||
|
||||
// newBasicRequestResponse creates a pair of Request,Response from its http versions.
|
||||
// It is basic because no parameter or (produces) content-type information is given.
|
||||
func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
|
||||
resp := NewResponse(httpWriter)
|
||||
resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
|
||||
return NewRequest(httpRequest), resp
|
||||
}
|
202
vendor/github.com/emicklei/go-restful/cors_filter.go
generated
vendored
Normal file
202
vendor/github.com/emicklei/go-restful/cors_filter.go
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
|
||||
// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
|
||||
// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
|
||||
//
|
||||
// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
|
||||
// http://enable-cors.org/server.html
|
||||
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
|
||||
type CrossOriginResourceSharing struct {
|
||||
ExposeHeaders []string // list of Header names
|
||||
AllowedHeaders []string // list of Header names
|
||||
AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
|
||||
AllowedMethods []string
|
||||
MaxAge int // number of seconds before requiring new Options request
|
||||
CookiesAllowed bool
|
||||
Container *Container
|
||||
|
||||
allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
|
||||
}
|
||||
|
||||
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
|
||||
// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
|
||||
func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
|
||||
origin := req.Request.Header.Get(HEADER_Origin)
|
||||
if len(origin) == 0 {
|
||||
if trace {
|
||||
traceLogger.Print("no Http header Origin set")
|
||||
}
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if !c.isOriginAllowed(origin) { // check whether this origin is allowed
|
||||
if trace {
|
||||
traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
|
||||
}
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if req.Request.Method != "OPTIONS" {
|
||||
c.doActualRequest(req, resp)
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
|
||||
c.doPreflightRequest(req, resp)
|
||||
} else {
|
||||
c.doActualRequest(req, resp)
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
|
||||
c.setOptionsHeaders(req, resp)
|
||||
// continue processing the response
|
||||
}
|
||||
|
||||
func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
|
||||
if len(c.AllowedMethods) == 0 {
|
||||
if c.Container == nil {
|
||||
c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
|
||||
} else {
|
||||
c.AllowedMethods = c.Container.computeAllowedMethods(req)
|
||||
}
|
||||
}
|
||||
|
||||
acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
|
||||
if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
|
||||
if trace {
|
||||
traceLogger.Printf("Http header %s:%s is not in %v",
|
||||
HEADER_AccessControlRequestMethod,
|
||||
acrm,
|
||||
c.AllowedMethods)
|
||||
}
|
||||
return
|
||||
}
|
||||
acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
|
||||
if len(acrhs) > 0 {
|
||||
for _, each := range strings.Split(acrhs, ",") {
|
||||
if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
|
||||
if trace {
|
||||
traceLogger.Printf("Http header %s:%s is not in %v",
|
||||
HEADER_AccessControlRequestHeaders,
|
||||
acrhs,
|
||||
c.AllowedHeaders)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
|
||||
resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
|
||||
c.setOptionsHeaders(req, resp)
|
||||
|
||||
// return http 200 response, no body
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
|
||||
c.checkAndSetExposeHeaders(resp)
|
||||
c.setAllowOriginHeader(req, resp)
|
||||
c.checkAndSetAllowCredentials(resp)
|
||||
if c.MaxAge > 0 {
|
||||
resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
|
||||
if len(origin) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(c.AllowedDomains) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
allowed := false
|
||||
for _, domain := range c.AllowedDomains {
|
||||
if domain == origin {
|
||||
allowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allowed {
|
||||
if len(c.allowedOriginPatterns) == 0 {
|
||||
// compile allowed domains to allowed origin patterns
|
||||
allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
c.allowedOriginPatterns = allowedOriginRegexps
|
||||
}
|
||||
|
||||
for _, pattern := range c.allowedOriginPatterns {
|
||||
if allowed = pattern.MatchString(origin); allowed {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allowed
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
|
||||
origin := req.Request.Header.Get(HEADER_Origin)
|
||||
if c.isOriginAllowed(origin) {
|
||||
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
|
||||
if len(c.ExposeHeaders) > 0 {
|
||||
resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
|
||||
if c.CookiesAllowed {
|
||||
resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
|
||||
for _, each := range allowedMethods {
|
||||
if each == method {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
|
||||
for _, each := range c.AllowedHeaders {
|
||||
if strings.ToLower(each) == strings.ToLower(header) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Take a list of strings and compile them into a list of regular expressions.
|
||||
func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
|
||||
regexps := []*regexp.Regexp{}
|
||||
for _, regexpStr := range regexpStrings {
|
||||
r, err := regexp.Compile(regexpStr)
|
||||
if err != nil {
|
||||
return regexps, err
|
||||
}
|
||||
regexps = append(regexps, r)
|
||||
}
|
||||
return regexps, nil
|
||||
}
|
2
vendor/github.com/emicklei/go-restful/coverage.sh
generated
vendored
Normal file
2
vendor/github.com/emicklei/go-restful/coverage.sh
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
go test -coverprofile=coverage.out
|
||||
go tool cover -html=coverage.out
|
164
vendor/github.com/emicklei/go-restful/curly.go
generated
vendored
Normal file
164
vendor/github.com/emicklei/go-restful/curly.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
|
||||
type CurlyRouter struct{}
|
||||
|
||||
// SelectRoute is part of the Router interface and returns the best match
|
||||
// for the WebService and its Route for the given Request.
|
||||
func (c CurlyRouter) SelectRoute(
|
||||
webServices []*WebService,
|
||||
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
|
||||
|
||||
requestTokens := tokenizePath(httpRequest.URL.Path)
|
||||
|
||||
detectedService := c.detectWebService(requestTokens, webServices)
|
||||
if detectedService == nil {
|
||||
if trace {
|
||||
traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
|
||||
}
|
||||
return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
||||
}
|
||||
candidateRoutes := c.selectRoutes(detectedService, requestTokens)
|
||||
if len(candidateRoutes) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
|
||||
}
|
||||
return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
||||
}
|
||||
selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
|
||||
if selectedRoute == nil {
|
||||
return detectedService, nil, err
|
||||
}
|
||||
return detectedService, selectedRoute, nil
|
||||
}
|
||||
|
||||
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
|
||||
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
|
||||
candidates := make(sortableCurlyRoutes, 0, 8)
|
||||
for _, each := range ws.routes {
|
||||
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
|
||||
if matches {
|
||||
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
||||
}
|
||||
}
|
||||
sort.Sort(candidates)
|
||||
return candidates
|
||||
}
|
||||
|
||||
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
|
||||
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
|
||||
if len(routeTokens) < len(requestTokens) {
|
||||
// proceed in matching only if last routeToken is wildcard
|
||||
count := len(routeTokens)
|
||||
if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
|
||||
return false, 0, 0
|
||||
}
|
||||
// proceed
|
||||
}
|
||||
for i, routeToken := range routeTokens {
|
||||
if i == len(requestTokens) {
|
||||
// reached end of request path
|
||||
return false, 0, 0
|
||||
}
|
||||
requestToken := requestTokens[i]
|
||||
if strings.HasPrefix(routeToken, "{") {
|
||||
paramCount++
|
||||
if colon := strings.Index(routeToken, ":"); colon != -1 {
|
||||
// match by regex
|
||||
matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
|
||||
if !matchesToken {
|
||||
return false, 0, 0
|
||||
}
|
||||
if matchesRemainder {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else { // no { prefix
|
||||
if requestToken != routeToken {
|
||||
return false, 0, 0
|
||||
}
|
||||
staticCount++
|
||||
}
|
||||
}
|
||||
return true, paramCount, staticCount
|
||||
}
|
||||
|
||||
// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
|
||||
// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
|
||||
func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
|
||||
regPart := routeToken[colon+1 : len(routeToken)-1]
|
||||
if regPart == "*" {
|
||||
if trace {
|
||||
traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
|
||||
}
|
||||
return true, true
|
||||
}
|
||||
matched, err := regexp.MatchString(regPart, requestToken)
|
||||
return (matched && err == nil), false
|
||||
}
|
||||
|
||||
var jsr311Router = RouterJSR311{}
|
||||
|
||||
// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
|
||||
// headers of the Request. See also RouterJSR311 in jsr311.go
|
||||
func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
|
||||
// tracing is done inside detectRoute
|
||||
return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
|
||||
}
|
||||
|
||||
// detectWebService returns the best matching webService given the list of path tokens.
|
||||
// see also computeWebserviceScore
|
||||
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
|
||||
var best *WebService
|
||||
score := -1
|
||||
for _, each := range webServices {
|
||||
matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
|
||||
if matches && (eachScore > score) {
|
||||
best = each
|
||||
score = eachScore
|
||||
}
|
||||
}
|
||||
return best
|
||||
}
|
||||
|
||||
// computeWebserviceScore returns whether tokens match and
|
||||
// the weighted score of the longest matching consecutive tokens from the beginning.
|
||||
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
|
||||
if len(tokens) > len(requestTokens) {
|
||||
return false, 0
|
||||
}
|
||||
score := 0
|
||||
for i := 0; i < len(tokens); i++ {
|
||||
each := requestTokens[i]
|
||||
other := tokens[i]
|
||||
if len(each) == 0 && len(other) == 0 {
|
||||
score++
|
||||
continue
|
||||
}
|
||||
if len(other) > 0 && strings.HasPrefix(other, "{") {
|
||||
// no empty match
|
||||
if len(each) == 0 {
|
||||
return false, score
|
||||
}
|
||||
score += 1
|
||||
} else {
|
||||
// not a parameter
|
||||
if each != other {
|
||||
return false, score
|
||||
}
|
||||
score += (len(tokens) - i) * 10 //fuzzy
|
||||
}
|
||||
}
|
||||
return true, score
|
||||
}
|
54
vendor/github.com/emicklei/go-restful/curly_route.go
generated
vendored
Normal file
54
vendor/github.com/emicklei/go-restful/curly_route.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
|
||||
type curlyRoute struct {
|
||||
route Route
|
||||
paramCount int
|
||||
staticCount int
|
||||
}
|
||||
|
||||
// sortableCurlyRoutes orders by most parameters and path elements first.
|
||||
type sortableCurlyRoutes []curlyRoute
|
||||
|
||||
func (s *sortableCurlyRoutes) add(route curlyRoute) {
|
||||
*s = append(*s, route)
|
||||
}
|
||||
|
||||
func (s sortableCurlyRoutes) routes() (routes []Route) {
|
||||
routes = make([]Route, 0, len(s))
|
||||
for _, each := range s {
|
||||
routes = append(routes, each.route) // TODO change return type
|
||||
}
|
||||
return routes
|
||||
}
|
||||
|
||||
func (s sortableCurlyRoutes) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s sortableCurlyRoutes) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
func (s sortableCurlyRoutes) Less(i, j int) bool {
|
||||
a := s[j]
|
||||
b := s[i]
|
||||
|
||||
// primary key
|
||||
if a.staticCount < b.staticCount {
|
||||
return true
|
||||
}
|
||||
if a.staticCount > b.staticCount {
|
||||
return false
|
||||
}
|
||||
// secundary key
|
||||
if a.paramCount < b.paramCount {
|
||||
return true
|
||||
}
|
||||
if a.paramCount > b.paramCount {
|
||||
return false
|
||||
}
|
||||
return a.route.Path < b.route.Path
|
||||
}
|
185
vendor/github.com/emicklei/go-restful/doc.go
generated
vendored
Normal file
185
vendor/github.com/emicklei/go-restful/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,185 @@
|
|||
/*
|
||||
Package restful , a lean package for creating REST-style WebServices without magic.
|
||||
|
||||
WebServices and Routes
|
||||
|
||||
A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
|
||||
Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
|
||||
WebServices must be added to a container (see below) in order to handler Http requests from a server.
|
||||
|
||||
A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
|
||||
This package has the logic to find the best matching Route and if found, call its Function.
|
||||
|
||||
ws := new(restful.WebService)
|
||||
ws.
|
||||
Path("/users").
|
||||
Consumes(restful.MIME_JSON, restful.MIME_XML).
|
||||
Produces(restful.MIME_JSON, restful.MIME_XML)
|
||||
|
||||
ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
|
||||
|
||||
...
|
||||
|
||||
// GET http://localhost:8080/users/1
|
||||
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
|
||||
id := request.PathParameter("user-id")
|
||||
...
|
||||
}
|
||||
|
||||
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
|
||||
|
||||
Regular expression matching Routes
|
||||
|
||||
A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
|
||||
For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
|
||||
Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
|
||||
This feature requires the use of a CurlyRouter.
|
||||
|
||||
Containers
|
||||
|
||||
A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
|
||||
Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
|
||||
The Default container of go-restful uses the http.DefaultServeMux.
|
||||
You can create your own Container and create a new http.Server for that particular container.
|
||||
|
||||
container := restful.NewContainer()
|
||||
server := &http.Server{Addr: ":8081", Handler: container}
|
||||
|
||||
Filters
|
||||
|
||||
A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
|
||||
You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
|
||||
In the restful package there are three hooks into the request,response flow where filters can be added.
|
||||
Each filter must define a FilterFunction:
|
||||
|
||||
func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
|
||||
|
||||
Use the following statement to pass the request,response pair to the next filter or RouteFunction
|
||||
|
||||
chain.ProcessFilter(req, resp)
|
||||
|
||||
Container Filters
|
||||
|
||||
These are processed before any registered WebService.
|
||||
|
||||
// install a (global) filter for the default container (processed before any webservice)
|
||||
restful.Filter(globalLogging)
|
||||
|
||||
WebService Filters
|
||||
|
||||
These are processed before any Route of a WebService.
|
||||
|
||||
// install a webservice filter (processed before any route)
|
||||
ws.Filter(webserviceLogging).Filter(measureTime)
|
||||
|
||||
|
||||
Route Filters
|
||||
|
||||
These are processed before calling the function associated with the Route.
|
||||
|
||||
// install 2 chained route filters (processed before calling findUser)
|
||||
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
|
||||
|
||||
Response Encoding
|
||||
|
||||
Two encodings are supported: gzip and deflate. To enable this for all responses:
|
||||
|
||||
restful.DefaultContainer.EnableContentEncoding(true)
|
||||
|
||||
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
|
||||
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
|
||||
|
||||
OPTIONS support
|
||||
|
||||
By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
|
||||
|
||||
Filter(OPTIONSFilter())
|
||||
|
||||
CORS
|
||||
|
||||
By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
|
||||
|
||||
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
|
||||
Filter(cors.Filter)
|
||||
|
||||
Error Handling
|
||||
|
||||
Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
|
||||
For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
|
||||
|
||||
400: Bad Request
|
||||
|
||||
If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
|
||||
|
||||
404: Not Found
|
||||
|
||||
Despite a valid URI, the resource requested may not be available
|
||||
|
||||
500: Internal Server Error
|
||||
|
||||
If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
|
||||
|
||||
405: Method Not Allowed
|
||||
|
||||
The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
|
||||
|
||||
406: Not Acceptable
|
||||
|
||||
The request does not have or has an unknown Accept Header set for this operation.
|
||||
|
||||
415: Unsupported Media Type
|
||||
|
||||
The request does not have or has an unknown Content-Type Header set for this operation.
|
||||
|
||||
ServiceError
|
||||
|
||||
In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
|
||||
|
||||
Performance options
|
||||
|
||||
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
|
||||
|
||||
restful.DefaultContainer.DoNotRecover(false)
|
||||
|
||||
DoNotRecover controls whether panics will be caught to return HTTP 500.
|
||||
If set to false, the container will recover from panics.
|
||||
Default value is true
|
||||
|
||||
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
|
||||
|
||||
If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
|
||||
Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
|
||||
|
||||
Trouble shooting
|
||||
|
||||
This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
|
||||
Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
|
||||
|
||||
restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
|
||||
|
||||
Logging
|
||||
|
||||
The restful.SetLogger() method allows you to override the logger used by the package. By default restful
|
||||
uses the standard library `log` package and logs to stdout. Different logging packages are supported as
|
||||
long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
|
||||
preferred package is simple.
|
||||
|
||||
Resources
|
||||
|
||||
[project]: https://github.com/emicklei/go-restful
|
||||
|
||||
[examples]: https://github.com/emicklei/go-restful/blob/master/examples
|
||||
|
||||
[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
|
||||
|
||||
[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
|
||||
|
||||
(c) 2012-2015, http://ernestmicklei.com. MIT License
|
||||
*/
|
||||
package restful
|
162
vendor/github.com/emicklei/go-restful/entity_accessors.go
generated
vendored
Normal file
162
vendor/github.com/emicklei/go-restful/entity_accessors.go
generated
vendored
Normal file
|
@ -0,0 +1,162 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
|
||||
type EntityReaderWriter interface {
|
||||
// Read a serialized version of the value from the request.
|
||||
// The Request may have a decompressing reader. Depends on Content-Encoding.
|
||||
Read(req *Request, v interface{}) error
|
||||
|
||||
// Write a serialized version of the value on the response.
|
||||
// The Response may have a compressing writer. Depends on Accept-Encoding.
|
||||
// status should be a valid Http Status code
|
||||
Write(resp *Response, status int, v interface{}) error
|
||||
}
|
||||
|
||||
// entityAccessRegistry is a singleton
|
||||
var entityAccessRegistry = &entityReaderWriters{
|
||||
protection: new(sync.RWMutex),
|
||||
accessors: map[string]EntityReaderWriter{},
|
||||
}
|
||||
|
||||
// entityReaderWriters associates MIME to an EntityReaderWriter
|
||||
type entityReaderWriters struct {
|
||||
protection *sync.RWMutex
|
||||
accessors map[string]EntityReaderWriter
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
|
||||
RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
|
||||
}
|
||||
|
||||
// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
|
||||
func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
|
||||
entityAccessRegistry.protection.Lock()
|
||||
defer entityAccessRegistry.protection.Unlock()
|
||||
entityAccessRegistry.accessors[mime] = erw
|
||||
}
|
||||
|
||||
// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
|
||||
// This package is already initialized with such an accessor using the MIME_JSON contentType.
|
||||
func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
|
||||
return entityJSONAccess{ContentType: contentType}
|
||||
}
|
||||
|
||||
// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
|
||||
// This package is already initialized with such an accessor using the MIME_XML contentType.
|
||||
func NewEntityAccessorXML(contentType string) EntityReaderWriter {
|
||||
return entityXMLAccess{ContentType: contentType}
|
||||
}
|
||||
|
||||
// accessorAt returns the registered ReaderWriter for this MIME type.
|
||||
func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
|
||||
r.protection.RLock()
|
||||
defer r.protection.RUnlock()
|
||||
er, ok := r.accessors[mime]
|
||||
if !ok {
|
||||
// retry with reverse lookup
|
||||
// more expensive but we are in an exceptional situation anyway
|
||||
for k, v := range r.accessors {
|
||||
if strings.Contains(mime, k) {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return er, ok
|
||||
}
|
||||
|
||||
// entityXMLAccess is a EntityReaderWriter for XML encoding
|
||||
type entityXMLAccess struct {
|
||||
// This is used for setting the Content-Type header when writing
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// Read unmarshalls the value from XML
|
||||
func (e entityXMLAccess) Read(req *Request, v interface{}) error {
|
||||
return xml.NewDecoder(req.Request.Body).Decode(v)
|
||||
}
|
||||
|
||||
// Write marshalls the value to JSON and set the Content-Type Header.
|
||||
func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
|
||||
return writeXML(resp, status, e.ContentType, v)
|
||||
}
|
||||
|
||||
// writeXML marshalls the value to JSON and set the Content-Type Header.
|
||||
func writeXML(resp *Response, status int, contentType string, v interface{}) error {
|
||||
if v == nil {
|
||||
resp.WriteHeader(status)
|
||||
// do not write a nil representation
|
||||
return nil
|
||||
}
|
||||
if resp.prettyPrint {
|
||||
// pretty output must be created and written explicitly
|
||||
output, err := xml.MarshalIndent(v, " ", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
_, err = resp.Write([]byte(xml.Header))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = resp.Write(output)
|
||||
return err
|
||||
}
|
||||
// not-so-pretty
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
return xml.NewEncoder(resp).Encode(v)
|
||||
}
|
||||
|
||||
// entityJSONAccess is a EntityReaderWriter for JSON encoding
|
||||
type entityJSONAccess struct {
|
||||
// This is used for setting the Content-Type header when writing
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// Read unmarshalls the value from JSON
|
||||
func (e entityJSONAccess) Read(req *Request, v interface{}) error {
|
||||
decoder := NewDecoder(req.Request.Body)
|
||||
decoder.UseNumber()
|
||||
return decoder.Decode(v)
|
||||
}
|
||||
|
||||
// Write marshalls the value to JSON and set the Content-Type Header.
|
||||
func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
|
||||
return writeJSON(resp, status, e.ContentType, v)
|
||||
}
|
||||
|
||||
// write marshalls the value to JSON and set the Content-Type Header.
|
||||
func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
|
||||
if v == nil {
|
||||
resp.WriteHeader(status)
|
||||
// do not write a nil representation
|
||||
return nil
|
||||
}
|
||||
if resp.prettyPrint {
|
||||
// pretty output must be created and written explicitly
|
||||
output, err := MarshalIndent(v, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
_, err = resp.Write(output)
|
||||
return err
|
||||
}
|
||||
// not-so-pretty
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
return NewEncoder(resp).Encode(v)
|
||||
}
|
35
vendor/github.com/emicklei/go-restful/filter.go
generated
vendored
Normal file
35
vendor/github.com/emicklei/go-restful/filter.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
|
||||
type FilterChain struct {
|
||||
Filters []FilterFunction // ordered list of FilterFunction
|
||||
Index int // index into filters that is currently in progress
|
||||
Target RouteFunction // function to call after passing all filters
|
||||
}
|
||||
|
||||
// ProcessFilter passes the request,response pair through the next of Filters.
|
||||
// Each filter can decide to proceed to the next Filter or handle the Response itself.
|
||||
func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
|
||||
if f.Index < len(f.Filters) {
|
||||
f.Index++
|
||||
f.Filters[f.Index-1](request, response, f)
|
||||
} else {
|
||||
f.Target(request, response)
|
||||
}
|
||||
}
|
||||
|
||||
// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
|
||||
type FilterFunction func(*Request, *Response, *FilterChain)
|
||||
|
||||
// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
|
||||
// See examples/restful-no-cache-filter.go for usage
|
||||
func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
|
||||
resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
|
||||
resp.Header().Set("Pragma", "no-cache") // HTTP 1.0.
|
||||
resp.Header().Set("Expires", "0") // Proxies.
|
||||
chain.ProcessFilter(req, resp)
|
||||
}
|
11
vendor/github.com/emicklei/go-restful/json.go
generated
vendored
Normal file
11
vendor/github.com/emicklei/go-restful/json.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// +build !jsoniter
|
||||
|
||||
package restful
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
var (
|
||||
MarshalIndent = json.MarshalIndent
|
||||
NewDecoder = json.NewDecoder
|
||||
NewEncoder = json.NewEncoder
|
||||
)
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue