Update go dependencies

This commit is contained in:
Manuel Alejandro de Brito Fontes 2018-12-05 13:27:09 -03:00
parent 3f8c60ab7a
commit 011bc4b08e
1299 changed files with 71186 additions and 91183 deletions

392
Gopkg.lock generated
View file

@ -2,15 +2,23 @@
[[projects]]
digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57"
digest = "1:5ad08b0e14866764a6d7475eb11c9cf05cad9a52c442593bdfa544703ff77f61"
name = "cloud.google.com/go"
packages = ["compute/metadata"]
pruneopts = "NUT"
revision = "97efc2c9ffd9fe8ef47f7f3203dc60bbca547374"
version = "v0.28.0"
revision = "74b12019e2aa53ec27882158f59192d7cd6d1998"
version = "v0.33.1"
[[projects]]
digest = "1:9fe4851c1eb1ab8c7486fee4e2d06db0e6509d6772211177e631c1abfb41b720"
digest = "1:f323f98930459f65f4699b5bfba563743680e2633d96e72d61a9732648e2d07c"
name = "contrib.go.opencensus.io/exporter/ocagent"
packages = ["."]
pruneopts = "NUT"
revision = "00af367e65149ff1f2f4b93bbfbb84fd9297170d"
version = "v0.2.0"
[[projects]]
digest = "1:fff60f8e65c264c3fe391e671cd84ce292a748c0a3fa19cdcef0cc34ffc123ae"
name = "github.com/Azure/go-autorest"
packages = [
"autorest",
@ -18,19 +26,19 @@
"autorest/azure",
"autorest/date",
"logger",
"version",
"tracing",
]
pruneopts = "NUT"
revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6"
version = "v10.15.5"
revision = "f401b1ccc8eb505927fae7a0c7f6406d37ca1c7e"
version = "v11.2.8"
[[projects]]
digest = "1:01252cd79aac70f16cac02a72a1067dd136e0ad6d5b597d0129cf74c739fd8d1"
digest = "1:d848e2bdc690ea54c4b49894b67a05db318a97ee6561879b814c2c1f82f61406"
name = "github.com/Sirupsen/logrus"
packages = ["."]
pruneopts = "NUT"
revision = "a67f783a3814b8729bd2dac5780b5f78f8dbd64d"
version = "v1.1.0"
revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95"
version = "v1.2.0"
[[projects]]
branch = "master"
@ -48,6 +56,19 @@
pruneopts = "NUT"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
digest = "1:65b0d980b428a6ad4425f2df4cd5410edd81f044cf527bd1c345368444649e58"
name = "github.com/census-instrumentation/opencensus-proto"
packages = [
"gen-go/agent/common/v1",
"gen-go/agent/trace/v1",
"gen-go/resource/v1",
"gen-go/trace/v1",
]
pruneopts = "NUT"
revision = "7f2434bc10da710debe5c4315ed6d4df454b4024"
version = "v0.1.0"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew"
@ -64,16 +85,6 @@
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
digest = "1:4189ee6a3844f555124d9d2656fe7af02fca961c2a9bad9074789df13a0c62e0"
name = "github.com/docker/distribution"
packages = [
"digestset",
"reference",
]
pruneopts = "NUT"
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
[[projects]]
digest = "1:4340101f42556a9cb2f7a360a0e95a019bfef6247d92e6c4c46f2433cf86a482"
name = "github.com/docker/go-units"
@ -84,14 +95,14 @@
[[projects]]
branch = "master"
digest = "1:da25cf063072a10461c19320e82117d85f9d60be4c95a62bc8d5a49acf7d0ca5"
digest = "1:eb8f1b1913bffd6e788deee9fe4ba3a4d83267aff6045d3be33105e35ece290b"
name = "github.com/docker/spdystream"
packages = [
".",
"spdy",
]
pruneopts = "NUT"
revision = "bc6354cbbc295e925e4c611ffe90c1f287ee54db"
revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85"
[[projects]]
branch = "master"
@ -109,6 +120,14 @@
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
version = "v1.1.0"
[[projects]]
digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f"
name = "github.com/evanphx/json-patch"
packages = ["."]
pruneopts = "NUT"
revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5"
version = "v4.1.0"
[[projects]]
digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129"
name = "github.com/fsnotify/fsnotify"
@ -125,14 +144,6 @@
pruneopts = "NUT"
revision = "8306686428a5fe132eac8cb7c4848af725098bd4"
[[projects]]
digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
name = "github.com/ghodss/yaml"
packages = ["."]
pruneopts = "NUT"
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
[[projects]]
digest = "1:cd4f86461732066e277335465962660cbf02999e18d5bbb5e9285eac4608b970"
name = "github.com/gogo/protobuf"
@ -146,24 +157,16 @@
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
version = "v1.1.1"
[[projects]]
branch = "master"
digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a"
name = "github.com/golang/glog"
packages = ["."]
pruneopts = "NUT"
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]]
branch = "master"
digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8"
name = "github.com/golang/groupcache"
packages = ["lru"]
pruneopts = "NUT"
revision = "6f2cf27854a4a29e3811b0371547be335d411b8b"
revision = "c65c006176ff7ff98bb916961c7abbc6b0afc0aa"
[[projects]]
digest = "1:63ccdfbd20f7ccd2399d0647a7d100b122f79c13bb83da9660b1598396fd9f62"
digest = "1:479e958ad7ae540d7a3c565d1839cc7c8ab9b627640144443f1e88d11d4023d0"
name = "github.com/golang/protobuf"
packages = [
"proto",
@ -171,6 +174,7 @@
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
"ptypes/wrappers",
]
pruneopts = "NUT"
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
@ -193,12 +197,12 @@
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
digest = "1:a1578f7323eca2b88021fdc9a79a99833d40b12c32a5ea4f284e2fad19ea2657"
digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1"
name = "github.com/google/uuid"
packages = ["."]
pruneopts = "NUT"
revision = "d460ce9f8df2e77fb1ba55ca87fafed96c607494"
version = "v1.0.0"
revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8"
version = "v1.1.0"
[[projects]]
digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e"
@ -214,7 +218,7 @@
[[projects]]
branch = "master"
digest = "1:8ccbd0e8ef8b6e59b331991fcb5477fb5e6c51b7d1be09789d7dc0a33e95bf6a"
digest = "1:3f5485f5f0ea50de409c25414eaf4154bd54b2fa9ef03fc4a0a278967daac906"
name = "github.com/gophercloud/gophercloud"
packages = [
".",
@ -226,7 +230,7 @@
"pagination",
]
pruneopts = "NUT"
revision = "39db44929bf62867520de8607bd519217a78f802"
revision = "8f4eb476f72cbb430de821134208c549a643e99b"
[[projects]]
branch = "master"
@ -237,7 +241,7 @@
"diskcache",
]
pruneopts = "NUT"
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
revision = "c63ab54fda8f77302f8d414e19933f2b6026a089"
[[projects]]
digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6"
@ -281,12 +285,12 @@
version = "v1.1.5"
[[projects]]
branch = "master"
digest = "1:c8a452cc8dd4ef9f857570ce2be31ca257a0928bf3c2b08cd7e11972b985c6d7"
digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed"
name = "github.com/konsorten/go-windows-terminal-sequences"
packages = ["."]
pruneopts = "NUT"
revision = "b729f2633dfe35f4d1d8a32385f6685610ce1cb5"
revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
version = "v1.0.1"
[[projects]]
branch = "master"
@ -325,11 +329,11 @@
[[projects]]
branch = "master"
digest = "1:5fe20cfe4ef484c237cec9f947b2a6fa90bad4b8610fd014f0e4211e13d82d5d"
digest = "1:a45ae66dea4c899d79fceb116accfa1892105c251f0dcd9a217ddc276b42ec68"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
pruneopts = "NUT"
revision = "fa473d140ef3c6adf42d6b391fe76707f1f243c8"
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
[[projects]]
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
@ -348,27 +352,47 @@
version = "1.0.1"
[[projects]]
branch = "master"
digest = "1:af70f88d68d35881112c13fa94ae3fcb53cfe14c4b8fb3d87a345bbf442d2747"
name = "github.com/moul/http2curl"
packages = ["."]
pruneopts = "NUT"
revision = "9ac6cf4d929b2fa8fd2d2e6dec5bb0feb4f4911d"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:7b782f694db508189ee26915bb12dcb74aefd5d96411ad8b04a12d880df1b810"
digest = "1:9e33629d4ec9e9344715a54fa0a107f23ce800deb13999b0190df04c3540ccb5"
name = "github.com/ncabatoff/go-seq"
packages = ["seq"]
pruneopts = "NUT"
revision = "b08ef85ed83364cba413c98a94bbd4169a0ce70b"
[[projects]]
branch = "master"
digest = "1:fcdc1a06529f364e1ba0e8a85540ae7ebbbe2e4b00d40245b24d1b8a3907b2e6"
name = "github.com/ncabatoff/process-exporter"
packages = [
".",
"proc",
]
pruneopts = "NUT"
revision = "5917bc766b95a1fa3c2ae85340f4de02a6b7e15e"
source = "github.com/aledbf/process-exporter"
revision = "bdf24ef23850ba2fc593f244256482375f7cbfcd"
[[projects]]
digest = "1:0be1cd4c73d5e22a30edcf32a18e9809a370a7a4a52c4f41a86070b34da93fef"
branch = "add-proc-status"
digest = "1:75f0f2e92ea523185b28b9377984503a7b2be7c7451831eca5478b20998a3799"
name = "github.com/ncabatoff/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs",
]
pruneopts = "NUT"
revision = "e1a38cb53622f65e073c5e750e6498a44ebfbd2a"
[[projects]]
digest = "1:cdc5cfc04dd0b98f86433207fe6d9879757c46734441a08886acc11251c7ed4a"
name = "github.com/onsi/ginkgo"
packages = [
".",
@ -391,11 +415,11 @@
"types",
]
pruneopts = "NUT"
revision = "3774a09d95489ccaa16032e0770d08ea77ba6184"
version = "v1.6.0"
revision = "2e1be8f7d90e9d3e3e58b0ce470f2f14d075406f"
version = "v1.7.0"
[[projects]]
digest = "1:95f40a9db820078d1795c7ba2d476016aca05dc4267eaf6752a925e437cb351f"
digest = "1:0db10c512b410a1ecd8845e31db52622c08b76198f7ab76afb25319c84a7fd4b"
name = "github.com/onsi/gomega"
packages = [
".",
@ -412,16 +436,8 @@
"types",
]
pruneopts = "NUT"
revision = "7615b9433f86a8bdf29709bf288bc4fd0636a369"
version = "v1.4.2"
[[projects]]
digest = "1:e0cc8395ea893c898ff5eb0850f4d9851c1f57c78c232304a026379a47a552d0"
name = "github.com/opencontainers/go-digest"
packages = ["."]
pruneopts = "NUT"
revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf"
version = "v1.0.0-rc1"
revision = "65fb64232476ad9046e57c26cd0bff3d3a8dc6cd"
version = "v1.4.3"
[[projects]]
digest = "1:c9e0e109a897ef306f865e55e07ecb1c3024edafd103e1c2b1a06a852dc91cb3"
@ -484,7 +500,7 @@
[[projects]]
branch = "master"
digest = "1:3b374d4b78e68dbab0cc07ce56e75d8b267ce1ac1de6c7b610191789abd3fee1"
digest = "1:69c96554af95dcf0afbabae18949708bd165a5c8393ee6e7299fc5a583f595a4"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
@ -492,7 +508,7 @@
"prometheus/promhttp",
]
pruneopts = "NUT"
revision = "2d5a6493f89f13c9779c2e097e0710e0f4478bae"
revision = "32b1bb4674c4db541df5b547bb1a325337522022"
[[projects]]
branch = "master"
@ -504,7 +520,7 @@
[[projects]]
branch = "master"
digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b"
digest = "1:06375f3b602de9c99fa99b8484f0e949fd5273e6e9c6592b5a0dd4cd9085f3ea"
name = "github.com/prometheus/common"
packages = [
"expfmt",
@ -512,11 +528,11 @@
"model",
]
pruneopts = "NUT"
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
revision = "4724e9255275ce38f7179b2478abeae4e28c904f"
[[projects]]
branch = "master"
digest = "1:26a2f5e891cc4d2321f18a0caa84c8e788663c17bed6a487f3cbe2c4295292d0"
digest = "1:102dea0c03a915acfc634b7c67f2662012b5483b56d9025e33f5188e112759b6"
name = "github.com/prometheus/procfs"
packages = [
".",
@ -525,7 +541,7 @@
"xfs",
]
pruneopts = "NUT"
revision = "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2"
revision = "aa55a523dc0a8297edf51bb75e8eec13eb3be45d"
[[projects]]
digest = "1:330e9062b308ac597e28485699c02223bd052437a6eed32a173c9227dcb9d95a"
@ -539,12 +555,12 @@
version = "v1.1.2"
[[projects]]
digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675"
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "NUT"
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
version = "v1.0.2"
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
digest = "1:c10994a08ed2ff2cc7611d03ded8bb5f782096880b2daab391adbd9ab95a1764"
@ -555,21 +571,40 @@
version = "1.0.2"
[[projects]]
branch = "master"
digest = "1:8e241498e35f550e5192ee6b1f6ff2c0a7ffe81feff9541d297facffe1383979"
name = "golang.org/x/crypto"
digest = "1:f2805adeca595d7dbd25173b57f83daaa79f44d43475263c4e34b05020eac9a7"
name = "go.opencensus.io"
packages = [
"ed25519",
"ed25519/internal/edwards25519",
"pbkdf2",
"ssh/terminal",
".",
"exemplar",
"internal",
"internal/tagencoding",
"plugin/ochttp",
"plugin/ochttp/propagation/b3",
"plugin/ochttp/propagation/tracecontext",
"stats",
"stats/internal",
"stats/view",
"tag",
"trace",
"trace/internal",
"trace/propagation",
"trace/tracestate",
]
pruneopts = "NUT"
revision = "5295e8364332db77d75fce11f1d19c053919a9c9"
revision = "b7bf3cdb64150a8c8c53b769fdeb2ba581bd4d4b"
version = "v0.18.0"
[[projects]]
branch = "master"
digest = "1:f7468b77e2eb541c768de1f12e3ad98debb07f1d49409f40f49b898588eca448"
digest = "1:38f553aff0273ad6f367cb0a0f8b6eecbaef8dc6cb8b50e57b6a81c1d5b1e332"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = "NUT"
revision = "eb0de9b17e854e9b1ccd9963efafc79862359959"
[[projects]]
branch = "master"
digest = "1:3441e889d2a118498de288fcfe0eb1726831a76c3285af5357674eb1cc150a0f"
name = "golang.org/x/net"
packages = [
"context",
@ -586,11 +621,11 @@
"trace",
]
pruneopts = "NUT"
revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de"
revision = "351d144fa1fc0bd934e2408202be0c29f25e35a0"
[[projects]]
branch = "master"
digest = "1:bc2b221d465bb28ce46e8d472ecdc424b9a9b541bd61d8c311c5f29c8dd75b1b"
digest = "1:169c27544e7f54a861a05cd84078b494eaf3e41543f0dfd4dd215fa32139cd40"
name = "golang.org/x/oauth2"
packages = [
".",
@ -600,18 +635,26 @@
"jwt",
]
pruneopts = "NUT"
revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9"
revision = "28207608b83849a028d4f12e46533a6b6894ecaf"
[[projects]]
branch = "master"
digest = "1:c8c9c630940822c796f2cdb5f7218d9710ea7d544f379813d8680f07590c2fee"
digest = "1:5e4d81c50cffcb124b899e4f3eabec3930c73532f0096c27f94476728ba03028"
name = "golang.org/x/sync"
packages = ["semaphore"]
pruneopts = "NUT"
revision = "42b317875d0fa942474b76e1b46a6060d720ae6e"
[[projects]]
branch = "master"
digest = "1:6ddfd101211f81df3ba1f474baf1c451f7708f01c1e0c4be49cd9f0af03596cf"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = "NUT"
revision = "dad3d9fb7b6e83d0f9ac8f54670f6334c3a287b4"
revision = "4ed8d59d0b35e1e29334a206d1b3f38b1e5dfb31"
[[projects]]
digest = "1:8a12cbc891b7130d3f660f8a309e5c0b083f831e6ac38cdaa1f12e63c12d6bea"
@ -650,14 +693,22 @@
[[projects]]
branch = "master"
digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4"
digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = "NUT"
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
[[projects]]
digest = "1:e2da54c7866453ac5831c61c7ec5d887f39328cac088c806553303bff4048e6f"
branch = "master"
digest = "1:5f003878aabe31d7f6b842d4de32b41c46c214bb629bb485387dbcce1edf5643"
name = "google.golang.org/api"
packages = ["support/bundler"]
pruneopts = "NUT"
revision = "af4fc4062c262223ddc2d92f5f35a93690db383a"
[[projects]]
digest = "1:b63b351b57e64ae8aec1af030dc5e74a2676fcda62102f006ae4411fac1b04c8"
name = "google.golang.org/appengine"
packages = [
".",
@ -672,8 +723,8 @@
"urlfetch",
]
pruneopts = "NUT"
revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06"
version = "v1.2.0"
revision = "4a4468ece617fc8205e99368fa2200e9d1fad421"
version = "v1.3.0"
[[projects]]
branch = "master"
@ -681,10 +732,10 @@
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = "NUT"
revision = "0e822944c569bf5c9afd034adaa56208bd2906ac"
revision = "31ac5d88444a9e7ad18077db9a165d793ad06a2e"
[[projects]]
digest = "1:5b805b8e03b29399b344655cac16873f026e54dc0a7c17b381f6f4d4c7b6d741"
digest = "1:2f91d3e11b666570f8c923912f1cc8cf2f0c6b7371b2687ee67a8f73f08c6272"
name = "google.golang.org/grpc"
packages = [
".",
@ -715,8 +766,8 @@
"tap",
]
pruneopts = "NUT"
revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1"
version = "v1.15.0"
revision = "2e463a05d100327ca47ac218281906921038fd95"
version = "v1.16.0"
[[projects]]
digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129"
@ -751,19 +802,6 @@
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
version = "v0.9.1"
[[projects]]
digest = "1:812f9446bc99ebd1c66c55fa456ff7843f7105d22f11f0a2098bced37e9c6d32"
name = "gopkg.in/square/go-jose.v2"
packages = [
".",
"cipher",
"json",
"jwt",
]
pruneopts = "NUT"
revision = "ef984e69dd356202fd4e4910d4d9c24468bdf0b8"
version = "v2.1.9"
[[projects]]
branch = "v1"
digest = "1:8fb1ccb16a6cfecbfdfeb84d8ea1cc7afa8f9ef16526bc2326f72d993e32cef1"
@ -773,15 +811,15 @@
revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
[[projects]]
digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082"
digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "NUT"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
version = "v2.2.2"
[[projects]]
digest = "1:c6b0cfc418f5e8bb9169d78f1024cb1c9f3e61c9a76235134d26d3f28ecdf27b"
digest = "1:c453ddc26bdab1e4267683a588ad9046e48d803a73f124fe2927adbab6ff02a5"
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
@ -789,6 +827,7 @@
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"auditregistration/v1alpha1",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
@ -817,10 +856,10 @@
"storage/v1beta1",
]
pruneopts = "NUT"
revision = "kubernetes-1.12.1"
revision = "kubernetes-1.13.0-rc.2"
[[projects]]
digest = "1:6863750b53ac3e57c4ea2b068c6c07d3b42a6dc965f64c5fdd56ae2ab768deb5"
digest = "1:501a73762f1b2c4530206ffb657b39d8b58a9b40280d30e4509ae1232767962c"
name = "k8s.io/apiextensions-apiserver"
packages = [
"pkg/apis/apiextensions",
@ -828,24 +867,20 @@
"pkg/client/clientset/clientset",
"pkg/client/clientset/clientset/scheme",
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1",
"pkg/features",
]
pruneopts = "NUT"
revision = "kubernetes-1.12.1"
revision = "kubernetes-1.13.0-rc.2"
[[projects]]
digest = "1:f48552d381c18aebe56e96a8b0478430def42a30ac20945f769eb90ada979b52"
digest = "1:692e27ed8a5eb2d74bde52d323d428814cd9a6e0f726d02ffd60fda7819e1ee7"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/equality",
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/api/validation",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1/validation",
"pkg/apis/meta/v1beta1",
"pkg/conversion",
"pkg/conversion/queryparams",
@ -873,7 +908,6 @@
"pkg/util/mergepatch",
"pkg/util/naming",
"pkg/util/net",
"pkg/util/rand",
"pkg/util/remotecommand",
"pkg/util/runtime",
"pkg/util/sets",
@ -890,25 +924,20 @@
"third_party/forked/golang/reflect",
]
pruneopts = "NUT"
revision = "kubernetes-1.12.1"
revision = "kubernetes-1.13.0-rc.2"
[[projects]]
digest = "1:8eaab7022d2018a1e398e049328f9ae6c35812bb3373441194f12d916d3b8140"
digest = "1:cc0487260dc4ffb2b513273ad8438497b8df2d8c0de90aaf03d22cc5b58e3fe1"
name = "k8s.io/apiserver"
packages = [
"pkg/authentication/authenticator",
"pkg/authentication/serviceaccount",
"pkg/authentication/user",
"pkg/features",
"pkg/server/healthz",
"pkg/util/feature",
"pkg/util/logs",
]
pruneopts = "NUT"
revision = "kubernetes-1.12.1"
revision = "kubernetes-1.13.0-rc.2"
[[projects]]
digest = "1:8e32eb6edca8a05b29222291da9447034c71d7524705b28db7d34366fb393b3c"
digest = "1:14961132526c5e588ccfa30efd6c977db308c1b1fb83ad4043c3a92c961521ae"
name = "k8s.io/client-go"
packages = [
"discovery",
@ -921,6 +950,8 @@
"informers/apps/v1",
"informers/apps/v1beta1",
"informers/apps/v1beta2",
"informers/auditregistration",
"informers/auditregistration/v1alpha1",
"informers/autoscaling",
"informers/autoscaling/v1",
"informers/autoscaling/v2beta1",
@ -970,6 +1001,8 @@
"kubernetes/typed/apps/v1beta1/fake",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/apps/v1beta2/fake",
"kubernetes/typed/auditregistration/v1alpha1",
"kubernetes/typed/auditregistration/v1alpha1/fake",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1/fake",
"kubernetes/typed/authentication/v1beta1",
@ -1027,6 +1060,7 @@
"listers/apps/v1",
"listers/apps/v1beta1",
"listers/apps/v1beta2",
"listers/auditregistration/v1alpha1",
"listers/autoscaling/v1",
"listers/autoscaling/v2beta1",
"listers/autoscaling/v2beta2",
@ -1076,12 +1110,10 @@
"tools/record",
"tools/reference",
"tools/remotecommand",
"tools/watch",
"transport",
"transport/spdy",
"util/buffer",
"util/cert",
"util/cert/triple",
"util/connrotation",
"util/exec",
"util/flowcontrol",
@ -1092,11 +1124,19 @@
"util/workqueue",
]
pruneopts = "NUT"
revision = "kubernetes-1.12.1"
revision = "kubernetes-1.13.0-rc.2"
[[projects]]
branch = "master"
digest = "1:9ca673cabdf1a203b841f27246e7f1211a6839a251bcd34c47d90a8e170fcf27"
digest = "1:aac84f18a5f8ef84b0048e4d8856521ef3d33cd50fd839326fa92befcd92bfd4"
name = "k8s.io/cloud-provider"
packages = ["."]
pruneopts = "NUT"
revision = "9b77dc1c384685cb732b3025ed5689dd597a5971"
[[projects]]
branch = "master"
digest = "1:741fc393d821a1acb7f8c85cd3cf8675f2c7f08a47096c4ca7ef6229f5acc763"
name = "k8s.io/csi-api"
packages = [
"pkg/apis/csi/v1alpha1",
@ -1105,83 +1145,64 @@
"pkg/client/clientset/versioned/typed/csi/v1alpha1",
]
pruneopts = "NUT"
revision = "31ae05d8096db803f5b4ff16cda6059c0a9cc861"
revision = "61a1735c3f5028c6dd5dc37e0f5573d8872507ba"
[[projects]]
digest = "1:9cc257b3c9ff6a0158c9c661ab6eebda1fe8a4a4453cd5c4044dc9a2ebfb992b"
name = "k8s.io/klog"
packages = ["."]
pruneopts = "NUT"
revision = "a5bc97fbc634d635061f3146511332c7e313a55a"
version = "v0.1.0"
[[projects]]
branch = "master"
digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54"
digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f"
name = "k8s.io/kube-openapi"
packages = ["pkg/util/proto"]
pruneopts = "NUT"
revision = "e3762e86a74c878ffed47484592986685639c2cd"
revision = "0317810137be915b9cf888946c6e115c1bfac693"
[[projects]]
digest = "1:75158cef6a899b0b047de1cb2b3a585eca459e64e797f99d89261b92674b8c2b"
digest = "1:df9bd8d59539e980d5d565b00a6d45e445e62d8c89d60fe0fa520ad6e54d32e4"
name = "k8s.io/kubernetes"
packages = [
"pkg/api/legacyscheme",
"pkg/api/service",
"pkg/api/v1/pod",
"pkg/apis/autoscaling",
"pkg/apis/core",
"pkg/apis/core/helper",
"pkg/apis/core/install",
"pkg/apis/core/pods",
"pkg/apis/core/v1",
"pkg/apis/core/v1/helper",
"pkg/apis/core/validation",
"pkg/apis/extensions",
"pkg/apis/networking",
"pkg/apis/policy",
"pkg/apis/scheduling",
"pkg/capabilities",
"pkg/cloudprovider",
"pkg/controller",
"pkg/features",
"pkg/fieldpath",
"pkg/kubelet/apis",
"pkg/kubelet/apis/cri/runtime/v1alpha2",
"pkg/kubelet/container",
"pkg/kubelet/types",
"pkg/kubelet/util/format",
"pkg/kubelet/util/sliceutils",
"pkg/master/ports",
"pkg/scheduler/algorithm",
"pkg/scheduler/algorithm/priorities/util",
"pkg/scheduler/api",
"pkg/scheduler/cache",
"pkg/scheduler/util",
"pkg/security/apparmor",
"pkg/serviceaccount",
"pkg/util/file",
"pkg/util/filesystem",
"pkg/util/hash",
"pkg/util/io",
"pkg/util/mount",
"pkg/util/net/sets",
"pkg/util/node",
"pkg/util/nsenter",
"pkg/util/parsers",
"pkg/util/sysctl",
"pkg/util/taints",
"pkg/volume",
"pkg/volume/util/fs",
"pkg/volume/util/recyclerclient",
"third_party/forked/golang/expansion",
]
pruneopts = "NUT"
revision = "v1.12.1"
revision = "v1.13.0-rc.2"
[[projects]]
branch = "master"
digest = "1:b8bb2923aa316490408300d029eeb4f566d54269e91242eeef680c70c2a1c041"
digest = "1:381323c2fe2e890a3dd3b5d6dc6f2199068408cca89b24f6b7ca1c60f32644a5"
name = "k8s.io/utils"
packages = [
"exec",
"pointer",
]
packages = ["exec"]
pruneopts = "NUT"
revision = "cd34563cd63c2bd7c6fe88a73c4dcf34ed8a67cb"
revision = "0d26856f57b32ec3398579285e5c8a2bfe8c5243"
[[projects]]
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
name = "sigs.k8s.io/yaml"
packages = ["."]
pruneopts = "NUT"
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
version = "v1.1.0"
[solve-meta]
analyzer-name = "dep"
@ -1189,7 +1210,6 @@
input-imports = [
"github.com/armon/go-proxyproto",
"github.com/eapache/channels",
"github.com/golang/glog",
"github.com/imdario/mergo",
"github.com/json-iterator/go",
"github.com/kylelemons/godebug/pretty",
@ -1221,6 +1241,7 @@
"k8s.io/apimachinery/pkg/apis/meta/v1",
"k8s.io/apimachinery/pkg/fields",
"k8s.io/apimachinery/pkg/labels",
"k8s.io/apimachinery/pkg/runtime",
"k8s.io/apimachinery/pkg/runtime/schema",
"k8s.io/apimachinery/pkg/util/intstr",
"k8s.io/apimachinery/pkg/util/runtime",
@ -1228,6 +1249,7 @@
"k8s.io/apimachinery/pkg/util/uuid",
"k8s.io/apimachinery/pkg/util/wait",
"k8s.io/apimachinery/pkg/version",
"k8s.io/apimachinery/pkg/watch",
"k8s.io/apiserver/pkg/server/healthz",
"k8s.io/apiserver/pkg/util/logs",
"k8s.io/client-go/informers",
@ -1244,9 +1266,9 @@
"k8s.io/client-go/tools/leaderelection/resourcelock",
"k8s.io/client-go/tools/record",
"k8s.io/client-go/util/cert",
"k8s.io/client-go/util/cert/triple",
"k8s.io/client-go/util/flowcontrol",
"k8s.io/client-go/util/workqueue",
"k8s.io/klog",
"k8s.io/kubernetes/pkg/api/v1/pod",
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils",
"k8s.io/kubernetes/pkg/util/filesystem",

View file

@ -39,6 +39,10 @@
name = "gopkg.in/fsnotify.v1"
source = "https://github.com/fsnotify/fsnotify.git"
[[override]]
name = "github.com/golang/glog"
source = "k8s.io/klog/glog"
[[constraint]]
name = "github.com/eapache/channels"
branch = "master"
@ -47,10 +51,6 @@
branch = "master"
name = "github.com/armon/go-proxyproto"
[[constraint]]
branch = "master"
name = "github.com/golang/glog"
[[constraint]]
name = "github.com/imdario/mergo"
version = "0.2.4"
@ -69,7 +69,6 @@
[[constraint]]
name = "github.com/ncabatoff/process-exporter"
source = "github.com/aledbf/process-exporter"
branch = "master"
[[constraint]]
@ -84,10 +83,6 @@
name = "github.com/prometheus/client_golang"
branch = "master"
[[constraint]]
name = "github.com/spf13/pflag"
version = "1.0.0"
[[constraint]]
name = "github.com/zakjan/cert-chain-resolver"
version = "1.0.1"
@ -102,24 +97,24 @@
[[constraint]]
name = "k8s.io/kubernetes"
revision = "v1.12.1"
revision = "v1.13.0-rc.2"
[[constraint]]
name = "k8s.io/api"
revision = "kubernetes-1.12.1"
revision = "kubernetes-1.13.0-rc.2"
[[constraint]]
name = "k8s.io/apimachinery"
revision = "kubernetes-1.12.1"
revision = "kubernetes-1.13.0-rc.2"
[[constraint]]
name = "k8s.io/client-go"
revision = "kubernetes-1.12.1"
revision = "kubernetes-1.13.0-rc.2"
[[constraint]]
name = "k8s.io/apiextensions-apiserver"
revision = "kubernetes-1.12.1"
revision = "kubernetes-1.13.0-rc.2"
[[constraint]]
name = "k8s.io/apiserver"
revision = "kubernetes-1.12.1"
revision = "kubernetes-1.13.0-rc.2"

View file

@ -1,25 +0,0 @@
sudo: false
language: go
go:
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
install:
- go get -v cloud.google.com/go/...
script:
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d
- tar xvf keys.tar
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762"
GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json"
GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests"
GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json"
GCLOUD_TESTS_GOLANG_KEYRING="projects/dulcet-port-762/locations/us/keyRings/go-integration-test"
GCLOUD_TESTS_GOLANG_ENABLE_REPLAY=yes
travis_wait ./run-tests.sh $TRAVIS_COMMIT
env:
matrix:
# The GCLOUD_TESTS_API_KEY environment variable.
secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI=

103
vendor/cloud.google.com/go/CHANGES.md generated vendored
View file

@ -1,5 +1,108 @@
# Changes
## v0.33.1
all: release v0.33.1
- compute: Removes an erroneously added go.mod.
- logging: Populate source location in fromLogEntry.
## v0.33.0
- bttest:
- Add support for apply_label_transformer.
- expr:
- Add expr library.
- firestore:
- Support retrieval of missing documents.
- kms:
- Add IAM methods.
- pubsub:
- Clarify extension documentation.
- scheduler:
- Add v1beta1 client.
- vision:
- Add product search helper.
- Add new product search client.
## v0.32.0
Note: This release is the last to support Go 1.6 and 1.8.
- bigquery:
- Add support for removing an expiration.
- Ignore NeverExpire in Table.Create.
- Validate table expiration time.
- cbt:
- Add note about not supporting arbitrary bytes.
- datastore:
- Align key checks.
- firestore:
- Return an error when using Start/End without providing values.
- pubsub:
- Add pstest Close method.
- Clarify MaxExtension documentation.
- securitycenter:
- Add v1beta1 client.
- spanner:
- Allow nil in mutations.
- Improve doc of SessionPoolConfig.MaxOpened.
- Increase session deletion timeout from 5s to 15s.
## v0.31.0
- bigtable:
- Group mutations across multiple requests.
- bigquery:
- Link to bigquery troubleshooting errors page in bigquery.Error comment.
- cbt:
- Fix go generate command.
- Document usage of both maxage + maxversions.
- datastore:
- Passing nil keys results in ErrInvalidKey.
- firestore:
- Clarify what Document.DataTo does with untouched struct fields.
- profile:
- Validate service name in agent.
- pubsub:
- Fix deadlock with pstest and ctx.Cancel.
- Fix a possible deadlock in pstest.
- trace:
- Update doc URL with new fragment.
Special thanks to @fastest963 for going above and beyond helping us to debug
hard-to-reproduce Pub/Sub issues.
## v0.30.0
- spanner: DML support added. See https://godoc.org/cloud.google.com/go/spanner#hdr-DML_and_Partitioned_DML for more information.
- bigtable: bttest supports row sample filter.
- functions: metadata package added for accessing Cloud Functions resource metadata.
## v0.29.0
- bigtable:
- Add retry to all idempotent RPCs.
- cbt supports complex GC policies.
- Emulator supports arbitrary bytes in regex filters.
- firestore: Add ArrayUnion and ArrayRemove.
- logging: Add the ContextFunc option to supply the context used for
asynchronous RPCs.
- profiler: Ignore NotDefinedError when fetching the instance name
- pubsub:
- BEHAVIOR CHANGE: Receive doesn't retry if an RPC returns codes.Cancelled.
- BEHAVIOR CHANGE: Receive retries on Unavailable intead of returning.
- Fix deadlock.
- Restore Ack/Nack/Modacks metrics.
- Improve context handling in iterator.
- Implement synchronous mode for Receive.
- pstest: add Pull.
- spanner: Add a metric for the number of sessions currently opened.
- storage:
- Canceling the context releases all resources.
- Add additional RetentionPolicy attributes.
- vision/apiv1: Add LocalizeObjects method.
## v0.28.0
- bigtable:

View file

@ -1,117 +1,177 @@
# Contributing
1. Sign one of the contributor license agreements below.
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
1. `go get golang.org/x/review/git-codereview` to install the code reviewing
tool.
1. You will need to ensure that your `GOBIN` directory (by default
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
1. If you would like, you may want to set up aliases for git-codereview,
such that `git codereview change` becomes `git change`. See the
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
such that `git codereview change` becomes `git change`. See the
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
1. Should you run into issues with the git-codereview tool, please note
that all error messages will assume that you have set up these
aliases.
that all error messages will assume that you have set up these aliases.
1. Get the cloud package by running `go get -d cloud.google.com/go`.
1. If you have already checked out the source, make sure that the remote git
origin is https://code.googlesource.com/gocloud:
1. If you have already checked out the source, make sure that the remote
git origin is https://code.googlesource.com/gocloud:
```
git remote set-url origin https://code.googlesource.com/gocloud
```
git remote set-url origin https://code.googlesource.com/gocloud
1. Make sure your auth is configured correctly by visiting
https://code.googlesource.com, clicking "Generate Password", and following
the directions.
https://code.googlesource.com, clicking "Generate Password", and following the
directions.
1. Make changes and create a change by running `git codereview change <name>`,
provide a commit message, and use `git codereview mail` to create a Gerrit CL.
1. Keep amending to the change with `git codereview change` and mail as your receive
feedback. Each new mailed amendment will create a new patch set for your change in Gerrit.
1. Keep amending to the change with `git codereview change` and mail as your
receive feedback. Each new mailed amendment will create a new patch set for
your change in Gerrit.
## Integration Tests
In addition to the unit tests, you may run the integration test suite.
In addition to the unit tests, you may run the integration test suite. These
directions describe setting up your environment to run integration tests for
_all_ packages: note that many of these instructions may be redundant if you
intend only to run integration tests on a single package.
To run the integrations tests, creating and configuration of a project in the
Google Developers Console is required.
#### GCP Setup
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
Ensure the project-level **Owner**
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the
service account. Alternatively, the account can be granted all of the following roles:
- **Editor**
- **Logs Configuration Writer**
- **PubSub Admin**
To run the integrations tests, creation and configuration of two projects in
the Google Developers Console is required: one specifically for Firestore
integration tests, and another for all other integration tests. We'll refer to
these projects as "general project" and "Firestore project".
Once you create a project, set the following environment variables to be able to
run the against the actual APIs.
After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount)
for each project. Ensure the project-level **Owner**
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to
each service account. During the creation of the service account, you should
download the JSON credential file for use later.
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
Next, ensure the following APIs are enabled in the general project:
Some packages require additional environment variables to be set:
- BigQuery API
- BigQuery Data Transfer API
- Cloud Dataproc API
- Cloud Dataproc Control API Private
- Cloud Datastore API
- Cloud Firestore API
- Cloud Key Management Service (KMS) API
- Cloud Natural Language API
- Cloud OS Login API
- Cloud Pub/Sub API
- Cloud Resource Manager API
- Cloud Spanner API
- Cloud Speech API
- Cloud Translation API
- Cloud Video Intelligence API
- Cloud Vision API
- Compute Engine API
- Compute Engine Instance Group Manager API
- Container Registry API
- Firebase Rules API
- Google Cloud APIs
- Google Cloud Deployment Manager V2 API
- Google Cloud SQL
- Google Cloud Storage
- Google Cloud Storage JSON API
- Google Compute Engine Instance Group Updater API
- Google Compute Engine Instance Groups API
- Kubernetes Engine API
- Stackdriver Error Reporting API
- firestore
- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: project ID for Firestore.
- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
- storage
- **GCLOUD_TESTS_GOLANG_KEYRING**: The full name of the keyring for the tests, in the
form "projects/P/locations/L/keyRings/R".
- translate
- **GCLOUD_TESTS_API_KEY**: API key for using the Translate API.
- profiler
- **GCLOUD_TESTS_GOLANG_ZONE**: Compute Engine zone.
Next, create a Datastore database in the general project, and a Firestore
database in the Firestore project.
Some packages can record the RPCs during integration tests to a file for
subsequent replay. To record, pass the `-record` flag to `go test`. The
recording will be saved to the _package_`.replay` file. To replay integration
tests from a saved recording, the replay file must be present, the `-short` flag
must be passed to `go test`, and the **GCLOUD_TESTS_GOLANG_ENABLE_REPLAY**
environment variable must have a non-empty value.
Finally, in the general project, create an API key for the translate API:
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
to create some resources used in integration tests.
- Go to GCP Developer Console.
- Navigate to APIs & Services > Credentials.
- Click Create Credentials > API Key.
- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below.
#### Local Setup
Once the two projects are created and configured, set the following environment
variables:
- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g.
bamboo-shift-455) for the general project.
- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general
project's service account.
- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID
(e.g. doorway-cliff-677) for the Firestore project.
- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
Firestore project's service account.
- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
in the form
"projects/P/locations/L/keyRings/R". The creation of this is described below.
- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API.
- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
create some resources used in integration tests.
From the project's root directory:
``` sh
# Set the default project in your env.
# Sets the default project in your env.
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
# Authenticate the gcloud tool with your account.
# Authenticates the gcloud tool with your account.
$ gcloud auth login
# Create the indexes used in the datastore integration tests.
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
$ gcloud datastore create-indexes datastore/testdata/index.yaml
# Create a Google Cloud storage bucket with the same name as your test project,
# Creates a Google Cloud storage bucket with the same name as your test project,
# and with the Stackdriver Logging service account as owner, for the sink
# integration tests in logging.
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
# Create a PubSub topic for integration tests of storage notifications.
# Creates a PubSub topic for integration tests of storage notifications.
$ gcloud beta pubsub topics create go-storage-notification-test
# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
# "service-<numberic project id>@gs-project-accounts.iam.gserviceaccount.com"
# as a publisher to that topic.
# Create a Spanner instance for the spanner integration tests.
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test'
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete
# the instance after testing with 'gcloud beta spanner instances delete'.
# Creates a Spanner instance for the spanner integration tests.
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test'
# NOTE: Spanner instances are priced by the node-hour, so you may want to
# delete the instance after testing with 'gcloud beta spanner instances delete'.
# For Storage integration tests:
# Enable KMS for your project in the Cloud Console.
# Create a KMS keyring, in the same location as the default location for your project's buckets.
$ gcloud kms keyrings create MY_KEYRING --location MY_LOCATION
# Create two keys in the keyring, named key1 and key2.
$ gcloud kms keys create key1 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption
$ gcloud kms keys create key2 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption
# As mentioned above, set the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/MY_LOCATION/keyRings/MY_KEYRING
# Authorize Google Cloud Storage to encrypt and decrypt using key1.
$ export MY_KEYRING=some-keyring-name
$ export MY_LOCATION=global
# Creates a KMS keyring, in the same location as the default location for your
# project's buckets.
$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION
# Creates two keys in the keyring, named key1 and key2.
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
```
Once you've done the necessary setup, you can run the integration tests by running:
#### Running
Once you've done the necessary setup, you can run the integration tests by
running:
``` sh
$ go test -v cloud.google.com/go/...
```
#### Replay
Some packages can record the RPCs during integration tests to a file for
subsequent replay. To record, pass the `-record` flag to `go test`. The
recording will be saved to the _package_`.replay` file. To replay integration
tests from a saved recording, the replay file must be present, the `-short`
flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY`
environment variable must have a non-empty value.
## Contributor License Agreements
Before we can accept your pull requests you'll need to sign a Contributor

View file

@ -54,6 +54,7 @@ Google API | Status | Package
[Asset][cloud-asset] | alpha | [`godoc.org/cloud.google.com/go/asset/v1beta`][cloud-asset-ref]
[BigQuery][cloud-bigquery] | stable | [`godoc.org/cloud.google.com/go/bigquery`][cloud-bigquery-ref]
[Bigtable][cloud-bigtable] | stable | [`godoc.org/cloud.google.com/go/bigtable`][cloud-bigtable-ref]
[Cloudtasks][cloud-tasks] | beta | [`godoc.org/cloud.google.com/go/cloudtasks/apiv2beta3`][cloud-tasks-ref]
[Container][cloud-container] | stable | [`godoc.org/cloud.google.com/go/container/apiv1`][cloud-container-ref]
[ContainerAnalysis][cloud-containeranalysis] | beta | [`godoc.org/cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref]
[Dataproc][cloud-dataproc] | stable | [`godoc.org/cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref]
@ -501,3 +502,6 @@ for more information.
[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
[cloud-asset-docs]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
[cloud-asset-ref]: https://godoc.org/cloud.google.com/go/asset/apiv1
[cloud-tasks]: https://cloud.google.com/tasks/
[cloud-tasks-ref]: https://godoc.org/cloud.google.com/go/cloudtasks/apiv2beta3

View file

@ -1,13 +1,47 @@
# How to Release this Repo
# How to Create a New Release
1. Determine the current release version with `git tag -l`. It should look
something like `vX.Y.Z`. We'll call the current
version `$CV` and the new version `$NV`.
1. On master, run `git log $CV..` to list all the changes since the last
release.
1. Edit `CHANGES.md` to include a summary of the changes.
1. Mail the CL containing the `CHANGES.md` changes. When the CL is approved, submit it.
1. Without submitting any other CLs:
a. Switch to master.
b. Tag the repo with the next version: `git tag $NV`.
c. Push the tag: `git push origin $NV`.
## Prerequisites
Install [releasetool](https://github.com/googleapis/releasetool).
## Create a release
1. `cd` into the root directory, e.g., `~/go/src/cloud.google.com/go`
1. Checkout the master branch and ensure a clean and up-to-date state.
```
git checkout master
git pull --tags origin master
```
1. Run releasetool to generate a changelog from the last version. Note,
releasetool will prompt if the new version is a major, minor, or patch
version.
```
releasetool start --language go
```
1. Format the output to match CHANGES.md.
1. Submit a CL with the changes in CHANGES.md. The commit message should look
like this (where `v0.31.0` is instead the correct version number):
```
all: Release v0.31.0
```
1. Wait for approval from all reviewers and then submit the CL.
1. Return to the master branch and pull the release commit.
```
git checkout master
git pull origin master
```
1. Tag the current commit with the new version (e.g., `v0.31.0`)
```
releasetool tag --language go
```
1. Publish the tag to GoogleSource (i.e., origin):
```
git push origin $NEW_VERSION
```
1. Visit the [releases page][releases] on GitHub and click the "Draft a new
release" button. For tag version, enter the tag published in the previous
step. For the release title, use the version (e.g., `v0.31.0`). For the
description, copy the changes added to CHANGES.md.
[releases]: https://github.com/GoogleCloudPlatform/google-cloud-go/releases

View file

@ -20,6 +20,7 @@
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@ -31,9 +32,6 @@ import (
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
const (
@ -143,7 +141,7 @@ func testOnGCE() bool {
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
res, err := defaultClient.hc.Do(req.WithContext(ctx))
if err != nil {
resc <- false
return

View file

@ -23,6 +23,7 @@
set -ex
APIS=(
google/api/expr/artman_cel.yaml
google/iam/artman_iam_admin.yaml
google/cloud/asset/artman_cloudasset_v1beta1.yaml
google/iam/credentials/artman_iamcredentials_v1.yaml
@ -37,6 +38,8 @@ google/cloud/oslogin/artman_oslogin_v1.yaml
google/cloud/oslogin/artman_oslogin_v1beta.yaml
google/cloud/redis/artman_redis_v1beta1.yaml
google/cloud/redis/artman_redis_v1.yaml
google/cloud/scheduler/artman_cloudscheduler_v1beta1.yaml
google/cloud/securitycenter/artman_securitycenter_v1beta1.yaml
google/cloud/speech/artman_speech_v1.yaml
google/cloud/speech/artman_speech_v1p1beta1.yaml
google/cloud/tasks/artman_cloudtasks_v2beta2.yaml
@ -47,7 +50,6 @@ google/cloud/videointelligence/artman_videointelligence_v1beta1.yaml
google/cloud/videointelligence/artman_videointelligence_v1beta2.yaml
google/cloud/vision/artman_vision_v1.yaml
google/cloud/vision/artman_vision_v1p1beta1.yaml
google/container/artman_container.yaml
google/devtools/artman_clouddebugger.yaml
google/devtools/clouderrorreporting/artman_errorreporting.yaml
google/devtools/cloudtrace/artman_cloudtrace_v1.yaml
@ -70,6 +72,13 @@ for api in "${APIS[@]}"; do
cp -r artman-genfiles/gapi-*/cloud.google.com/go/* $GOPATH/src/cloud.google.com/go/
done
# NOTE(pongad): `sed -i` doesn't work on Macs, because -i option needs an argument.
# `-i ''` doesn't work on GNU, since the empty string is treated as a file name.
# So we just create the backup and delete it after.
ver=$(date +%Y%m%d)
find $GOPATH/src/cloud.google.com/go/ -name '*.go' -exec sed -i.backup -e "s/^const versionClient.*/const versionClient = \"$ver\"/" '{}' +
find $GOPATH/src/cloud.google.com/go/ -name '*.backup' -delete
#go list cloud.google.com/go/... | grep apiv | xargs go test
#go test -short cloud.google.com/go/...

View file

@ -0,0 +1,17 @@
language: go
go:
- 1.10.x
go_import_path: contrib.go.opencensus.io/exporter/ocagent
before_script:
- GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
- PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
script:
- go build ./... # Ensure dependency updates don't break build
- if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi
- go vet ./...
- go test -v -race $PKGS # Run all the tests with the race detector enabled
- 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'

View file

@ -0,0 +1,24 @@
# How to contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution,
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult [GitHub Help] for more
information on using pull requests.
[GitHub Help]: https://help.github.com/articles/about-pull-requests/

View file

@ -1,7 +1,6 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
@ -176,13 +175,24 @@
END OF TERMS AND CONDITIONS
Copyright 2016 Docker, Inc.
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

View file

@ -0,0 +1,61 @@
# OpenCensus Agent Go Exporter
[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url]
This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter.
OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from
OpenCensus Library, export them to other backends and possibly push configurations back to
Library. See more details on [OC-Agent Readme][OCAgentReadme].
Note: This is an experimental repository and is likely to get backwards-incompatible changes.
Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo].
## Installation
```bash
$ go get -u contrib.go.opencensus.io/exporter/ocagent/v1
```
## Usage
```go
import (
"context"
"fmt"
"log"
"time"
"contrib.go.opencensus.io/exporter/ocagent/v1"
"go.opencensus.io/trace"
)
func Example() {
exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name"))
if err != nil {
log.Fatalf("Failed to create the agent exporter: %v", err)
}
defer exp.Stop()
// Now register it as a trace exporter.
trace.RegisterExporter(exp)
// Then use the OpenCensus tracing library, like we normally would.
ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example")
defer span.End()
for i := 0; i < 10; i++ {
_, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i))
<-time.After(6 * time.Millisecond)
iSpan.End()
}
}
```
[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto
[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go
[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg
[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent
[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master
[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent

View file

@ -0,0 +1,38 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
import (
"math/rand"
"time"
)
var randSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
// retries function fn upto n times, if fn returns an error lest it returns nil early.
// It applies exponential backoff in units of (1<<n) + jitter microsends.
func nTriesWithExponentialBackoff(nTries int64, timeBaseUnit time.Duration, fn func() error) (err error) {
for i := int64(0); i < nTries; i++ {
err = fn()
if err == nil {
return nil
}
// Backoff for a time period with a pseudo-random jitter
jitter := time.Duration(randSrc.Float64()*100) * time.Microsecond
ts := jitter + ((1 << uint64(i)) * timeBaseUnit)
<-time.After(ts)
}
return err
}

View file

@ -0,0 +1,9 @@
module contrib.go.opencensus.io/exporter/ocagent
require (
github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a
github.com/golang/protobuf v1.2.0
go.opencensus.io v0.17.0
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf
google.golang.org/grpc v1.15.0
)

View file

@ -0,0 +1,44 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/census-instrumentation/opencensus-proto v0.0.1 h1:4v5I+ax5jCmwTYVaWQacX8ZSxvUZemBX4UwBGSkDeoA=
github.com/census-instrumentation/opencensus-proto v0.0.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a h1:t88pXOTS5K+pjfuhTOcul6sdC4khgqB8ukyfbe62Zxo=
github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
go.opencensus.io v0.17.0 h1:2Cu88MYg+1LU+WVD+NWwYhyP0kKgRlN9QjWGaX0jKTE=
go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.15.0 h1:Az/KuahOM4NAidTEuJCv/RonAA7rYsTPkqXVjr+8OOw=
google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View file

@ -0,0 +1,41 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
import (
"os"
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
"go.opencensus.io"
)
func createNodeInfo(nodeName string) *commonpb.Node {
return &commonpb.Node{
Identifier: &commonpb.ProcessIdentifier{
HostName: os.Getenv("HOSTNAME"),
Pid: uint32(os.Getpid()),
StartTimestamp: timeToTimestamp(startTime),
},
LibraryInfo: &commonpb.LibraryInfo{
Language: commonpb.LibraryInfo_GO_LANG,
ExporterVersion: Version,
CoreLibraryVersion: opencensus.Version(),
},
ServiceInfo: &commonpb.ServiceInfo{
Name: nodeName,
},
Attributes: make(map[string]string),
}
}

View file

@ -0,0 +1,299 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
import (
"context"
"errors"
"fmt"
"sync"
"time"
"google.golang.org/api/support/bundler"
"google.golang.org/grpc"
"go.opencensus.io/trace"
agentcommonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1"
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
)
var startupMu sync.Mutex
var startTime time.Time
func init() {
startupMu.Lock()
startTime = time.Now()
startupMu.Unlock()
}
var _ trace.Exporter = (*Exporter)(nil)
type Exporter struct {
// mu protects the non-atomic and non-channel variables
mu sync.RWMutex
started bool
stopped bool
agentPort uint16
agentAddress string
serviceName string
canDialInsecure bool
traceSvcClient agenttracepb.TraceServiceClient
traceExporter agenttracepb.TraceService_ExportClient
nodeInfo *agentcommonpb.Node
grpcClientConn *grpc.ClientConn
traceBundler *bundler.Bundler
}
func NewExporter(opts ...ExporterOption) (*Exporter, error) {
exp, err := NewUnstartedExporter(opts...)
if err != nil {
return nil, err
}
if err := exp.Start(); err != nil {
return nil, err
}
return exp, nil
}
const spanDataBufferSize = 300
func NewUnstartedExporter(opts ...ExporterOption) (*Exporter, error) {
e := new(Exporter)
for _, opt := range opts {
opt.withExporter(e)
}
if e.agentPort <= 0 {
e.agentPort = DefaultAgentPort
}
traceBundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) {
e.uploadTraces(bundle.([]*trace.SpanData))
})
traceBundler.DelayThreshold = 2 * time.Second
traceBundler.BundleCountThreshold = spanDataBufferSize
e.traceBundler = traceBundler
e.nodeInfo = createNodeInfo(e.serviceName)
return e, nil
}
const (
maxInitialConfigRetries = 10
maxInitialTracesRetries = 10
)
// Start dials to the agent, establishing a connection to it. It also
// initiates the Config and Trace services by sending over the initial
// messages that consist of the node identifier. Start performs a best case
// attempt to try to send the initial messages, by applying exponential
// backoff at most 10 times.
func (ae *Exporter) Start() error {
ae.mu.Lock()
defer ae.mu.Unlock()
err := ae.doStartLocked()
if err == nil {
ae.started = true
return nil
}
// Otherwise we have an error and should clean up to avoid leaking resources.
ae.started = false
if ae.grpcClientConn != nil {
ae.grpcClientConn.Close()
}
return err
}
func (ae *Exporter) prepareAgentAddress() string {
if ae.agentAddress != "" {
return ae.agentAddress
}
port := DefaultAgentPort
if ae.agentPort > 0 {
port = ae.agentPort
}
return fmt.Sprintf("%s:%d", DefaultAgentHost, port)
}
func (ae *Exporter) doStartLocked() error {
if ae.started {
return nil
}
// Now start it
cc, err := ae.dialToAgent()
if err != nil {
return err
}
ae.grpcClientConn = cc
// Initiate the trace service by sending over node identifier info.
traceSvcClient := agenttracepb.NewTraceServiceClient(cc)
traceExporter, err := traceSvcClient.Export(context.Background())
if err != nil {
return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err)
}
firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{Node: ae.nodeInfo}
err = nTriesWithExponentialBackoff(maxInitialTracesRetries, 200*time.Microsecond, func() error {
return traceExporter.Send(firstTraceMessage)
})
if err != nil {
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
}
ae.traceExporter = traceExporter
// Initiate the config service by sending over node identifier info.
configStream, err := traceSvcClient.Config(context.Background())
if err != nil {
return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err)
}
firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: ae.nodeInfo}
err = nTriesWithExponentialBackoff(maxInitialConfigRetries, 200*time.Microsecond, func() error {
return configStream.Send(firstCfgMessage)
})
if err != nil {
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
}
// In the background, handle trace configurations that are beamed down
// by the agent, but also reply to it with the applied configuration.
go ae.handleConfigStreaming(configStream)
return nil
}
// dialToAgent performs a best case attempt to dial to the agent.
// It retries failed dials with:
// * gRPC dialTimeout of 1s
// * exponential backoff, 5 times with a period of 50ms
// hence in the worst case of (no agent actually available), it
// will take at least:
// (5 * 1s) + ((1<<5)-1) * 0.01 s = 5s + 1.55s = 6.55s
func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
addr := ae.prepareAgentAddress()
dialOpts := []grpc.DialOption{grpc.WithBlock()}
if ae.canDialInsecure {
dialOpts = append(dialOpts, grpc.WithInsecure())
}
var cc *grpc.ClientConn
dialOpts = append(dialOpts, grpc.WithTimeout(1*time.Second))
dialBackoffWaitPeriod := 50 * time.Millisecond
err := nTriesWithExponentialBackoff(5, dialBackoffWaitPeriod, func() error {
var err error
cc, err = grpc.Dial(addr, dialOpts...)
return err
})
return cc, err
}
func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error {
for {
recv, err := configStream.Recv()
if err != nil {
// TODO: Check if this is a transient error or exponential backoff-able.
return err
}
cfg := recv.Config
if cfg == nil {
continue
}
// Otherwise now apply the trace configuration sent down from the agent
if psamp := cfg.GetProbabilitySampler(); psamp != nil {
trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)})
} else if csamp := cfg.GetConstantSampler(); csamp != nil {
alwaysSample := csamp.Decision == true
if alwaysSample {
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
} else {
trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()})
}
} else { // TODO: Add the rate limiting sampler here
}
// Then finally send back to upstream the newly applied configuration
err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}})
if err != nil {
return err
}
}
}
var (
errNotStarted = errors.New("not started")
)
// Stop shuts down all the connections and resources
// related to the exporter.
func (ae *Exporter) Stop() error {
ae.mu.Lock()
defer ae.mu.Unlock()
if !ae.started {
return errNotStarted
}
if ae.stopped {
// TODO: tell the user that we've already stopped, so perhaps a sentinel error?
return nil
}
ae.Flush()
// Now close the underlying gRPC connection.
var err error
if ae.grpcClientConn != nil {
err = ae.grpcClientConn.Close()
}
// At this point we can change the state variables: started and stopped
ae.started = false
ae.stopped = true
return err
}
func (ae *Exporter) ExportSpan(sd *trace.SpanData) {
if sd == nil {
return
}
_ = ae.traceBundler.Add(sd, -1)
}
func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
if len(sdl) == 0 {
return
}
protoSpans := make([]*tracepb.Span, 0, len(sdl))
for _, sd := range sdl {
if sd != nil {
protoSpans = append(protoSpans, ocSpanToProtoSpan(sd))
}
}
if len(protoSpans) > 0 {
_ = ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
Spans: protoSpans,
})
}
}
func (ae *Exporter) Flush() {
ae.traceBundler.Flush()
}

View file

@ -0,0 +1,80 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
const (
DefaultAgentPort uint16 = 55678
DefaultAgentHost string = "localhost"
)
type ExporterOption interface {
withExporter(e *Exporter)
}
type portSetter uint16
func (ps portSetter) withExporter(e *Exporter) {
e.agentPort = uint16(ps)
}
var _ ExporterOption = (*portSetter)(nil)
type insecureGrpcConnection int
var _ ExporterOption = (*insecureGrpcConnection)(nil)
func (igc *insecureGrpcConnection) withExporter(e *Exporter) {
e.canDialInsecure = true
}
// WithInsecure disables client transport security for the exporter's gRPC connection
// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure
// does. Note, by default, client security is required unless WithInsecure is used.
func WithInsecure() ExporterOption { return new(insecureGrpcConnection) }
// WithPort allows one to override the port that the exporter will
// connect to the agent on, instead of using DefaultAgentPort.
func WithPort(port uint16) ExporterOption {
return portSetter(port)
}
type addressSetter string
func (as addressSetter) withExporter(e *Exporter) {
e.agentAddress = string(as)
}
var _ ExporterOption = (*addressSetter)(nil)
// WithAddress allows one to set the address that the exporter will
// connect to the agent on. If unset, it will instead try to use
// connect to DefaultAgentHost:DefaultAgentPort
func WithAddress(addr string) ExporterOption {
return addressSetter(addr)
}
type serviceNameSetter string
func (sns serviceNameSetter) withExporter(e *Exporter) {
e.serviceName = string(sns)
}
var _ ExporterOption = (*serviceNameSetter)(nil)
// WithServiceName allows one to set/override the service name
// that the exporter will report to the agent.
func WithServiceName(serviceName string) ExporterOption {
return serviceNameSetter(serviceName)
}

View file

@ -0,0 +1,162 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
import (
"time"
"go.opencensus.io/trace"
"go.opencensus.io/trace/tracestate"
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
"github.com/golang/protobuf/ptypes/timestamp"
)
func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span {
if sd == nil {
return nil
}
var namePtr *tracepb.TruncatableString
if sd.Name != "" {
namePtr = &tracepb.TruncatableString{Value: sd.Name}
}
return &tracepb.Span{
TraceId: sd.TraceID[:],
SpanId: sd.SpanID[:],
ParentSpanId: sd.ParentSpanID[:],
Status: ocStatusToProtoStatus(sd.Status),
StartTime: timeToTimestamp(sd.StartTime),
EndTime: timeToTimestamp(sd.EndTime),
Links: ocLinksToProtoLinks(sd.Links),
Kind: ocSpanKindToProtoSpanKind(sd.SpanKind),
Name: namePtr,
Attributes: ocAttributesToProtoAttributes(sd.Attributes),
Tracestate: ocTracestateToProtoTracestate(sd.Tracestate),
}
}
var blankStatus trace.Status
func ocStatusToProtoStatus(status trace.Status) *tracepb.Status {
if status == blankStatus {
return nil
}
return &tracepb.Status{
Code: status.Code,
Message: status.Message,
}
}
func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links {
if len(links) == 0 {
return nil
}
sl := make([]*tracepb.Span_Link, 0, len(links))
for _, ocLink := range links {
// This redefinition is necessary to prevent ocLink.*ID[:] copies
// being reused -- in short we need a new ocLink per iteration.
ocLink := ocLink
sl = append(sl, &tracepb.Span_Link{
TraceId: ocLink.TraceID[:],
SpanId: ocLink.SpanID[:],
Type: ocLinkTypeToProtoLinkType(ocLink.Type),
})
}
return &tracepb.Span_Links{
Link: sl,
}
}
func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type {
switch oct {
case trace.LinkTypeChild:
return tracepb.Span_Link_CHILD_LINKED_SPAN
case trace.LinkTypeParent:
return tracepb.Span_Link_PARENT_LINKED_SPAN
default:
return tracepb.Span_Link_TYPE_UNSPECIFIED
}
}
func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes {
if len(attrs) == 0 {
return nil
}
outMap := make(map[string]*tracepb.AttributeValue)
for k, v := range attrs {
switch v := v.(type) {
case bool:
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}}
case int:
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}}
case int64:
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}}
case string:
outMap[k] = &tracepb.AttributeValue{
Value: &tracepb.AttributeValue_StringValue{
StringValue: &tracepb.TruncatableString{Value: v},
},
}
}
}
return &tracepb.Span_Attributes{
AttributeMap: outMap,
}
}
func timeToTimestamp(t time.Time) *timestamp.Timestamp {
nanoTime := t.UnixNano()
return &timestamp.Timestamp{
Seconds: nanoTime / 1e9,
Nanos: int32(nanoTime % 1e9),
}
}
func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind {
switch kind {
case trace.SpanKindClient:
return tracepb.Span_CLIENT
case trace.SpanKindServer:
return tracepb.Span_SERVER
default:
return tracepb.Span_SPAN_KIND_UNSPECIFIED
}
}
func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate {
if ts == nil {
return nil
}
return &tracepb.Span_Tracestate{
Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()),
}
}
func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry {
protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries))
for _, entry := range entries {
protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{
Key: entry.Key,
Value: entry.Value,
})
}
return protoEntries
}

View file

@ -0,0 +1,17 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
const Version = "0.0.1"

View file

@ -9,6 +9,7 @@ _obj
_test
.DS_Store
.idea/
.vscode/
# Architecture specific extensions/prefixes
*.[568vq]

View file

@ -6,8 +6,6 @@ go:
- master
- 1.11.x
- 1.10.x
- 1.9.x
- 1.8.x
matrix:
allow_failures:
@ -20,7 +18,7 @@ before_install:
- curl -L -o $GOPATH/bin/dep https://github.com/golang/dep/releases/download/v$DEP_VERSION/dep-linux-amd64 && chmod +x $GOPATH/bin/dep
install:
- go get -u github.com/golang/lint/golint
- go get -u golang.org/x/lint/golint
- go get -u github.com/stretchr/testify
- go get -u github.com/GoASTScanner/gas
- dep ensure
@ -32,4 +30,4 @@ script:
- go vet ./autorest/...
- test -z "$(gas ./autorest/... | tee /dev/stderr | grep Error)"
- go build -v ./autorest/...
- go test -v ./autorest/...
- go test -race -v ./autorest/...

View file

@ -1,5 +1,100 @@
# CHANGELOG
## v11.2.8
### Bug Fixes
- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package.
## v11.2.7
### Bug Fixes
- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`.
Note that for backward compatibility reasons, both will work until the next major version release of the package.
## v11.2.6
### Bug Fixes
- If zero bytes are read from a polling response body don't attempt to unmarshal them.
## v11.2.5
### Bug Fixes
- Removed race condition in `autorest.DoRetryForStatusCodes`.
## v11.2.4
### Bug Fixes
- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available.
## v11.2.1
NOTE: Versions of Go prior to 1.10 have been removed from CI as they no
longer work with golint.
### Bug Fixes
- Method `MSIConfig.Authorizer` now supports user-assigned identities.
- The adal package now reports its own user-agent string.
## v11.2.0
### New Features
- Added `tracing` package that enables instrumentation of HTTP and API calls.
Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable`
will start instrumenting the code for metrics and traces.
Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or
calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an
App Insights Local Forwarder that is needs to be running. Note that if the
AI Local Forwarder is not running tracking will still be enabled.
By default, instrumentation is disabled. Once enabled, instrumentation can also
be programatically disabled by calling `Disable`.
- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated.
### Bug Fixes
- Don't use the initial request's context for LRO polling.
- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if
it is already set.
## v11.1.1
### Bug Fixes
- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller.
## v11.1.0
### New Features
- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI.
- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version.
## v11.0.1
### New Features
- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication.
## v11.0.0
### Breaking Changes
- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number`
- ExpiresIn
- ExpiresOn
- NotBefore
### New Features
- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource.
- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration.
## v10.15.5
### Bug Fixes
@ -28,21 +123,21 @@
### Bug Fixes
- If an LRO API returns a ```Failed``` provisioning state in the initial response return an error at that point so the caller doesn't have to poll.
- For failed LROs without an OData v4 error include the response body in the error's ```AdditionalInfo``` field to aid in diagnosing the failure.
- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll.
- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure.
## v10.15.0
### New Features
- Add initial support for request/response logging via setting environment variables.
Setting ```AZURE_GO_SDK_LOG_LEVEL``` to ```LogInfo``` will log request/response
without their bodies. To include the bodies set the log level to ```LogDebug```.
Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response
without their bodies. To include the bodies set the log level to `LogDebug`.
By default the logger writes to strerr, however it can also write to stdout or a file
if specified in ```AZURE_GO_SDK_LOG_FILE```. Note that if the specified file
if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file
already exists it will be truncated.
IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key
headers. Any other secrets will *not* be redacted.
headers. Any other secrets will _not_ be redacted.
## v10.14.0
@ -124,10 +219,10 @@
### Deprecated Methods
| Old Method | New Method |
|-------------:|:-----------:|
|azure.NewFuture() | azure.NewFutureFromResponse()|
|Future.WaitForCompletion() | Future.WaitForCompletionRef()|
| Old Method | New Method |
| -------------------------: | :---------------------------: |
| azure.NewFuture() | azure.NewFutureFromResponse() |
| Future.WaitForCompletion() | Future.WaitForCompletionRef() |
### New Features
@ -159,7 +254,7 @@
### New Features
- Added *WithContext() methods to ADAL token refresh operations.
- Added \*WithContext() methods to ADAL token refresh operations.
## v10.6.2
@ -192,12 +287,14 @@
## v10.4.0
### New Features
- Added helper for parsing Azure Resource ID's.
- Added deprecation message to utils.GetEnvVarOrExit()
## v10.3.0
### New Features
- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints
- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint
@ -255,6 +352,7 @@
- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race).
## v9.10.0
- Fix the Service Bus suffix in Azure public env
- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control)
@ -308,6 +406,7 @@
## v9.5.3
### Bug Fixes
- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters.
- Set correct Content Type when using autorest.WithFormData.
@ -315,7 +414,7 @@
### Bug Fixes
- Check for nil *http.Response before dereferencing it.
- Check for nil \*http.Response before dereferencing it.
## v9.5.1
@ -399,8 +498,8 @@
### Bug Fixes
- RetriableRequest can now tolerate a ReadSeekable body being read but not reset.
- Adding missing Apache Headers
- RetriableRequest can now tolerate a ReadSeekable body being read but not reset.
- Adding missing Apache Headers
## v9.0.0
@ -429,9 +528,11 @@ Updates to Error string formats for clarity. Also, adding a copy of the http.Res
- Make RetriableRequest work with multiple versions of Go
## v8.1.1
Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8.
## v8.1.0
Adds RetriableRequest type for more efficient handling of retrying HTTP requests.
## v8.0.0
@ -440,9 +541,11 @@ ADAL refactored into its own package.
Support for UNIX time.
## v7.3.1
- Version Testing now removed from production bits that are shipped with the library.
## v7.3.0
- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations
to acknowledge that they do not need either the entire or a trailing portion
of accepts response body. In doing so, Go's http library can reuse HTTP
@ -452,40 +555,49 @@ Support for UNIX time.
- Updating Glide dependencies.
## v7.2.5
- Fixed the Active Directory endpoint for the China cloud.
- Removes UTF-8 BOM if present in response payload.
- Added telemetry.
## v7.2.3
- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay
duration.
## v7.2.2
- autorest/azure: added ASM and ARM VM DNS suffixes.
## v7.2.1
- fixed parsing of UTC times that are not RFC3339 conformant.
## v7.2.0
- autorest/validation: Reformat validation error for better error message.
## v7.1.0
- preparer: Added support for multipart formdata - WithMultiPartFormdata()
- preparer: Added support for sending file in request body - WithFile
- client: Added RetryDuration parameter.
- autorest/validation: new package for validation code for Azure Go SDK.
## v7.0.7
- Add trailing / to endpoint
- azure: add EnvironmentFromName
## v7.0.6
- Add retry logic for 408, 500, 502, 503 and 504 status codes.
- Change url path and query encoding logic.
- Fix DelayForBackoff for proper exponential delay.
- Add CookieJar in Client.
## v7.0.5
- Add check to start polling only when status is in [200,201,202].
- Refactoring for unchecked errors.
- azure/persist changes.
@ -494,20 +606,25 @@ Support for UNIX time.
- Add attribute details in service error.
## v7.0.4
- Better error messages for long running operation failures
## v7.0.3
- Corrected DoPollForAsynchronous to properly handle the initial response
## v7.0.2
- Corrected DoPollForAsynchronous to continue using the polling method first discovered
## v7.0.1
- Fixed empty JSON input error in ByUnmarshallingJSON
- Fixed polling support for GET calls
- Changed format name from TimeRfc1123 to TimeRFC1123
## v7.0.0
- Added ByCopying responder with supporting TeeReadCloser
- Rewrote Azure asynchronous handling
- Reverted to only unmarshalling JSON
@ -524,9 +641,11 @@ only checked for one of those (that is, the presence of the `Azure-AsyncOperatio
The new code correctly covers all cases and aligns with the other Azure SDKs.
## v6.1.0
- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values.
## v6.0.0
- Completely reworked the handling of polled and asynchronous requests
- Removed unnecessary routines
- Reworked `mocks.Sender` to replay a series of `http.Response` objects
@ -537,21 +656,25 @@ Handling polled and asynchronous requests is no longer part of `Client#Send`. In
and `azure.DoPollForAsynchronous` for examples.
## v5.0.0
- Added new RespondDecorators unmarshalling primitive types
- Corrected application of inspection and authorization PrependDecorators
## v4.0.0
- Added support for Azure long-running operations.
- Added cancelation support to all decorators and functions that may delay.
- Breaking: `DelayForBackoff` now accepts a channel, which may be nil.
## v3.1.0
- Add support for OAuth Device Flow authorization.
- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material.
- Add helpers for persisting and restoring Tokens.
- Increased code coverage in the github.com/Azure/autorest/azure package
## v3.0.0
- Breaking: `NewErrorWithError` no longer takes `statusCode int`.
- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`.
- Breaking: `Client#Send()` no longer takes `codes ...int` argument.

View file

@ -1,6 +1,26 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:e1b859e3d9e90007d5fbf25edf57733b224f1857f6592636130afab3af8cfae7"
name = "contrib.go.opencensus.io/exporter/ocagent"
packages = ["."]
pruneopts = ""
revision = "00af367e65149ff1f2f4b93bbfbb84fd9297170d"
version = "v0.2.0"
[[projects]]
digest = "1:e0a4505d5cf7ac6b5d92e3aee79d838b5f1ae8e9641ec7fa5d1e9b01d7a7ea95"
name = "github.com/census-instrumentation/opencensus-proto"
packages = [
"gen-go/agent/common/v1",
"gen-go/agent/trace/v1",
"gen-go/trace/v1",
]
pruneopts = ""
revision = "24333298e36590ea0716598caacc8959fc393c48"
version = "v0.0.2"
[[projects]]
digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77"
name = "github.com/davecgh/go-spew"
@ -18,20 +38,35 @@
version = "v3.2.0"
[[projects]]
branch = "master"
digest = "1:7f175a633086a933d1940a7e7dc2154a0070a7c25fb4a2f671f3eef1a34d1fd7"
name = "github.com/dimchansky/utfbom"
packages = ["."]
pruneopts = ""
revision = "5448fe645cb1964ba70ac8f9f2ffe975e61a536c"
version = "v1.0.0"
[[projects]]
digest = "1:3dd078fda7500c341bc26cfbc6c6a34614f295a2457149fc1045cab767cbcf18"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
"ptypes/wrappers",
]
pruneopts = ""
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
version = "v1.2.0"
[[projects]]
branch = "master"
digest = "1:096a8a9182648da3d00ff243b88407838902b6703fc12657f76890e08d1899bf"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
pruneopts = ""
revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
version = "v1.0.0"
[[projects]]
digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
@ -52,25 +87,160 @@
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]]
digest = "1:ad67dfd3799a2c58f6c65871dd141d8b53f61f600aec48ce8d7fa16a4d5476f8"
name = "go.opencensus.io"
packages = [
".",
"exemplar",
"internal",
"internal/tagencoding",
"plugin/ochttp",
"plugin/ochttp/propagation/b3",
"plugin/ochttp/propagation/tracecontext",
"stats",
"stats/internal",
"stats/view",
"tag",
"trace",
"trace/internal",
"trace/propagation",
"trace/tracestate",
]
pruneopts = ""
revision = "b7bf3cdb64150a8c8c53b769fdeb2ba581bd4d4b"
version = "v0.18.0"
[[projects]]
branch = "master"
digest = "1:793a79198b755828dec284c6f1325e24e09186f1b7ba818b65c7c35104ed86eb"
digest = "1:78f41d38365ccef743e54ed854a2faf73313ba0750c621116a8eeb0395590bd0"
name = "golang.org/x/crypto"
packages = [
"pkcs12",
"pkcs12/internal/rc2",
]
pruneopts = ""
revision = "614d502a4dac94afa3a6ce146bd1736da82514c6"
revision = "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb"
[[projects]]
branch = "master"
digest = "1:547dcb6aebfb7fb17947660ebb034470c13f4d63d893def190a2f7ba3d09bc38"
name = "golang.org/x/net"
packages = [
"context",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"trace",
]
pruneopts = ""
revision = "49bb7cea24b1df9410e1712aa6433dae904ff66a"
[[projects]]
branch = "master"
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
name = "golang.org/x/sync"
packages = ["semaphore"]
pruneopts = ""
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
[[projects]]
branch = "master"
digest = "1:2ed0bf267e44950120acd95570227e28184573ffb099bd85b529ee148e004ddb"
name = "golang.org/x/sys"
packages = ["unix"]
pruneopts = ""
revision = "fa43e7bc11baaae89f3f902b2b4d832b68234844"
[[projects]]
digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = ""
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:ca2e72555afbcebdead7c961b135650b5111dbbaf37a874de63976fdda57f129"
name = "google.golang.org/api"
packages = ["support/bundler"]
pruneopts = ""
revision = "c51f30376ab7ec4f22b65de846a41593c8b70f07"
[[projects]]
branch = "master"
digest = "1:1b3b4ec811695907c4a3cb92e4f32834a4a42459bff7e02068b6b2b5344803cd"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = ""
revision = "af9cb2a35e7f169ec875002c1829c9b315cddc04"
[[projects]]
digest = "1:15656947b87a6a240e61dcfae9e71a55a8d5677f240d12ab48f02cdbabf1e309"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclog",
"internal",
"internal/backoff",
"internal/channelz",
"internal/envconfig",
"internal/grpcrand",
"internal/transport",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
]
pruneopts = ""
revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1"
version = "v1.15.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"contrib.go.opencensus.io/exporter/ocagent",
"github.com/dgrijalva/jwt-go",
"github.com/dimchansky/utfbom",
"github.com/mitchellh/go-homedir",
"github.com/stretchr/testify/require",
"go.opencensus.io/plugin/ochttp",
"go.opencensus.io/plugin/ochttp/propagation/tracecontext",
"go.opencensus.io/stats/view",
"go.opencensus.io/trace",
"golang.org/x/crypto/pkcs12",
]
solver-name = "gps-cdcl"

View file

@ -25,17 +25,21 @@
version = "3.1.0"
[[constraint]]
branch = "master"
name = "github.com/dimchansky/utfbom"
version = "1.0.0"
[[constraint]]
branch = "master"
name = "github.com/mitchellh/go-homedir"
version = "1.0.0"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.0"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
name = "go.opencensus.io"
version = "0.18.0"
[[constraint]]
name = "contrib.go.opencensus.io/exporter/ocagent"
version = "0.2.0"

View file

@ -19,10 +19,6 @@ import (
"net/url"
)
const (
activeDirectoryAPIVersion = "1.0"
)
// OAuthConfig represents the endpoints needed
// in OAuth operations
type OAuthConfig struct {
@ -46,11 +42,25 @@ func validateStringParam(param, name string) error {
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
apiVer := "1.0"
return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
}
// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
return nil, err
}
api := ""
// it's legal for tenantID to be empty so don't validate it
const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s"
if apiVersion != nil {
if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
return nil, err
}
api = fmt.Sprintf("?api-version=%s", *apiVersion)
}
const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
u, err := url.Parse(activeDirectoryEndpoint)
if err != nil {
return nil, err
@ -59,15 +69,15 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err
if err != nil {
return nil, err
}
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion))
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
if err != nil {
return nil, err
}
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion))
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
if err != nil {
return nil, err
}
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion))
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
if err != nil {
return nil, err
}

View file

@ -38,7 +38,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
return sf(r)
}
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
// http.Request and pass it along or, first, pass the http.Request along then react to the
// http.Response result.
type SendDecorator func(Sender) Sender

View file

@ -29,13 +29,12 @@ import (
"net"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/version"
"github.com/Azure/go-autorest/tracing"
"github.com/dgrijalva/jwt-go"
)
@ -97,18 +96,27 @@ type RefresherWithContext interface {
type TokenRefreshCallback func(Token) error
// Token encapsulates the access token used to authorize Azure requests.
// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response
type Token struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn string `json:"expires_in"`
ExpiresOn string `json:"expires_on"`
NotBefore string `json:"not_before"`
ExpiresIn json.Number `json:"expires_in"`
ExpiresOn json.Number `json:"expires_on"`
NotBefore json.Number `json:"not_before"`
Resource string `json:"resource"`
Type string `json:"token_type"`
}
func newToken() Token {
return Token{
ExpiresIn: "0",
ExpiresOn: "0",
NotBefore: "0",
}
}
// IsZero returns true if the token object is zero-initialized.
func (t Token) IsZero() bool {
return t == Token{}
@ -116,12 +124,12 @@ func (t Token) IsZero() bool {
// Expires returns the time.Time when the Token expires.
func (t Token) Expires() time.Time {
s, err := strconv.Atoi(t.ExpiresOn)
s, err := t.ExpiresOn.Float64()
if err != nil {
s = -3600
}
expiration := date.NewUnixTimeFromSeconds(float64(s))
expiration := date.NewUnixTimeFromSeconds(s)
return time.Time(expiration).UTC()
}
@ -218,6 +226,8 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo
token := jwt.New(jwt.SigningMethodRS256)
token.Header["x5t"] = thumbprint
x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)}
token.Header["x5c"] = x5c
token.Claims = jwt.MapClaims{
"aud": spt.inner.OauthConfig.TokenEndpoint.String(),
"iss": spt.inner.ClientID,
@ -375,8 +385,13 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error {
if err != nil {
return err
}
spt.refreshLock = &sync.RWMutex{}
spt.sender = &http.Client{}
// Don't override the refreshLock or the sender if those have been already set.
if spt.refreshLock == nil {
spt.refreshLock = &sync.RWMutex{}
}
if spt.sender == nil {
spt.sender = &http.Client{Transport: tracing.Transport}
}
return nil
}
@ -414,6 +429,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
}
spt := &ServicePrincipalToken{
inner: servicePrincipalToken{
Token: newToken(),
OauthConfig: oauthConfig,
Secret: secret,
ClientID: id,
@ -422,7 +438,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
RefreshWithin: defaultRefresh,
},
refreshLock: &sync.RWMutex{},
sender: &http.Client{},
sender: &http.Client{Transport: tracing.Transport},
refreshCallbacks: callbacks,
}
return spt, nil
@ -653,6 +669,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
spt := &ServicePrincipalToken{
inner: servicePrincipalToken{
Token: newToken(),
OauthConfig: OAuthConfig{
TokenEndpoint: *msiEndpointURL,
},
@ -662,7 +679,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
RefreshWithin: defaultRefresh,
},
refreshLock: &sync.RWMutex{},
sender: &http.Client{},
sender: &http.Client{Transport: tracing.Transport},
refreshCallbacks: callbacks,
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
}
@ -779,7 +796,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
if err != nil {
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
}
req.Header.Add("User-Agent", version.UserAgent())
req.Header.Add("User-Agent", userAgent())
req = req.WithContext(ctx)
if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
v := url.Values{}

View file

@ -1,4 +1,9 @@
package version
package adal
import (
"fmt"
"runtime"
)
// Copyright 2017 Microsoft Corporation
//
@ -14,24 +19,17 @@ package version
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"runtime"
)
// Number contains the semantic version of this SDK.
const Number = "v10.15.5"
const number = "v1.0.0"
var (
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s",
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
Number,
number,
)
)
// UserAgent returns a string containing the Go version, system archityecture and OS, and the go-autorest version.
func UserAgent() string {
return userAgent
func userAgent() string {
return ua
}

View file

@ -21,6 +21,7 @@ import (
"strings"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/tracing"
)
const (
@ -68,7 +69,7 @@ func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[str
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
}
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters.
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
return func(p Preparer) Preparer {
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
@ -147,7 +148,7 @@ type BearerAuthorizerCallback struct {
// is invoked when the HTTP request is submitted.
func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
if sender == nil {
sender = &http.Client{}
sender = &http.Client{Transport: tracing.Transport}
}
return &BearerAuthorizerCallback{sender: sender, callback: callback}
}

View file

@ -26,6 +26,7 @@ import (
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/tracing"
)
const (
@ -58,10 +59,7 @@ func NewFuture(req *http.Request) Future {
// with the initial response from an asynchronous operation.
func NewFutureFromResponse(resp *http.Response) (Future, error) {
pt, err := createPollingTracker(resp)
if err != nil {
return Future{}, err
}
return Future{pt: pt}, nil
return Future{pt: pt}, err
}
// Response returns the last HTTP response.
@ -89,7 +87,23 @@ func (f Future) PollingMethod() PollingMethodType {
}
// Done queries the service to see if the operation has completed.
// Deprecated: Use DoneWithContext()
func (f *Future) Done(sender autorest.Sender) (bool, error) {
return f.DoneWithContext(context.Background(), sender)
}
// DoneWithContext queries the service to see if the operation has completed.
func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) {
ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext")
defer func() {
sc := -1
resp := f.Response()
if resp != nil {
sc = resp.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
// support for legacy Future implementation
if f.req != nil {
resp, err := sender.Do(f.req)
@ -110,7 +124,7 @@ func (f *Future) Done(sender autorest.Sender) (bool, error) {
if f.pt.hasTerminated() {
return true, f.pt.pollingError()
}
if err := f.pt.pollForStatus(sender); err != nil {
if err := f.pt.pollForStatus(ctx, sender); err != nil {
return false, err
}
if err := f.pt.checkForErrors(); err != nil {
@ -167,11 +181,25 @@ func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) e
// running operation has completed, the provided context is cancelled, or the client's
// polling duration has been exceeded. It will retry failed polling attempts based on
// the retry value defined in the client up to the maximum retry attempts.
func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) error {
ctx, cancel := context.WithTimeout(ctx, client.PollingDuration)
defer cancel()
done, err := f.Done(client)
for attempts := 0; !done; done, err = f.Done(client) {
func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) {
ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef")
defer func() {
sc := -1
resp := f.Response()
if resp != nil {
sc = resp.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
cancelCtx := ctx
if d := client.PollingDuration; d != 0 {
var cancel context.CancelFunc
cancelCtx, cancel = context.WithTimeout(ctx, d)
defer cancel()
}
done, err := f.DoneWithContext(ctx, client)
for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) {
if attempts >= client.RetryAttempts {
return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded")
}
@ -195,12 +223,12 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien
attempts++
}
// wait until the delay elapses or the context is cancelled
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done())
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done())
if !delayElapsed {
return autorest.NewErrorWithError(ctx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled")
return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled")
}
}
return err
return
}
// MarshalJSON implements the json.Marshaler interface.
@ -285,7 +313,7 @@ type pollingTracker interface {
initializeState() error
// makes an HTTP request to check the status of the LRO
pollForStatus(sender autorest.Sender) error
pollForStatus(ctx context.Context, sender autorest.Sender) error
// updates internal tracker state, call this after each call to pollForStatus
updatePollingState(provStateApl bool) error
@ -399,6 +427,10 @@ func (pt *pollingTrackerBase) updateRawBody() error {
if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body")
}
// observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty
if len(b) == 0 {
return nil
}
// put the body back so it's available to other callers
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
if err = json.Unmarshal(b, &pt.rawBody); err != nil {
@ -408,15 +440,13 @@ func (pt *pollingTrackerBase) updateRawBody() error {
return nil
}
func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error {
func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error {
req, err := http.NewRequest(http.MethodGet, pt.URI, nil)
if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request")
}
// attach the context from the original request if available (it will be absent for deserialized futures)
if pt.resp != nil {
req = req.WithContext(pt.resp.Request.Context())
}
req = req.WithContext(ctx)
pt.resp, err = sender.Do(req)
if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
@ -445,7 +475,7 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
re := respErr{}
defer pt.resp.Body.Close()
var b []byte
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil {
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 {
goto Default
}
if err = json.Unmarshal(b, &re); err != nil {
@ -663,7 +693,7 @@ func (pt *pollingTrackerPatch) updatePollingMethod() error {
}
}
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
// note the absense of the "final GET" mechanism for PATCH
// note the absence of the "final GET" mechanism for PATCH
if pt.resp.StatusCode == http.StatusAccepted {
ao, err := getURLFromAsyncOpHeader(pt.resp)
if err != nil {

View file

@ -140,8 +140,8 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
}
// poll for registered provisioning state
now := time.Now()
for err == nil && time.Since(now) < client.PollingDuration {
registrationStartTime := time.Now()
for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) {
// taken from the resources SDK
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
preparer := autorest.CreatePreparer(
@ -183,7 +183,7 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
return originalReq.Context().Err()
}
}
if !(time.Since(now) < client.PollingDuration) {
if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) {
return errors.New("polling for resource provider registration has exceeded the polling duration")
}
return err

View file

@ -26,7 +26,7 @@ import (
"time"
"github.com/Azure/go-autorest/logger"
"github.com/Azure/go-autorest/version"
"github.com/Azure/go-autorest/tracing"
)
const (
@ -147,6 +147,7 @@ type Client struct {
PollingDelay time.Duration
// PollingDuration sets the maximum polling time after which an error is returned.
// Setting this to zero will use the provided context to control the duration.
PollingDuration time.Duration
// RetryAttempts sets the default number of retry attempts for client.
@ -173,7 +174,7 @@ func NewClientWithUserAgent(ua string) Client {
PollingDuration: DefaultPollingDuration,
RetryAttempts: DefaultRetryAttempts,
RetryDuration: DefaultRetryDuration,
UserAgent: version.UserAgent(),
UserAgent: UserAgent(),
}
c.Sender = c.sender()
c.AddToUserAgent(ua)
@ -229,8 +230,10 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
func (c Client) sender() Sender {
if c.Sender == nil {
j, _ := cookiejar.New(nil)
return &http.Client{Jar: j}
client := &http.Client{Jar: j, Transport: tracing.Transport}
return client
}
return c.Sender
}

View file

@ -21,6 +21,8 @@ import (
"net/http"
"strconv"
"time"
"github.com/Azure/go-autorest/tracing"
)
// Sender is the interface that wraps the Do method to send HTTP requests.
@ -38,7 +40,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
return sf(r)
}
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
// http.Request and pass it along or, first, pass the http.Request along then react to the
// http.Response result.
type SendDecorator func(Sender) Sender
@ -68,7 +70,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
//
// Send will not poll or retry requests.
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
return SendWithSender(&http.Client{}, r, decorators...)
return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...)
}
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
@ -216,8 +218,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
rr := NewRetriableRequest(r)
// Increment to add the first call (attempts denotes number of retries)
attempts++
for attempt := 0; attempt < attempts; {
for attempt := 0; attempt < attempts+1; {
err = rr.Prepare()
if err != nil {
return resp, err

View file

@ -157,7 +157,7 @@ func AsStringSlice(s interface{}) ([]string, error) {
}
// String method converts interface v to string. If interface is a list, it
// joins list elements using the seperator. Note that only sep[0] will be used for
// joins list elements using the separator. Note that only sep[0] will be used for
// joining if any separator is specified.
func String(v interface{}, sep ...string) string {
if len(sep) == 0 {

View file

@ -1,7 +1,5 @@
package autorest
import "github.com/Azure/go-autorest/version"
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
@ -16,7 +14,28 @@ import "github.com/Azure/go-autorest/version"
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"runtime"
)
const number = "v11.2.8"
var (
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
number,
)
)
// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version.
func UserAgent() string {
return userAgent
}
// Version returns the semantic version (see http://semver.org).
func Version() string {
return version.Number
return number
}

View file

@ -162,7 +162,7 @@ type Writer interface {
// WriteResponse writes the specified HTTP response to the logger if the log level is greater than
// or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher.
// Custom filters can be specified to exclude URL, header, and/or body content from the log.
// By default no respone content is excluded.
// By default no response content is excluded.
WriteResponse(resp *http.Response, filter Filter)
}
@ -318,7 +318,7 @@ func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) {
// returns true if the provided body should be included in the log
func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool {
ct := header.Get("Content-Type")
return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1
return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream")
}
// creates standard header for log entries, it contains a timestamp and the log level

190
vendor/github.com/Azure/go-autorest/tracing/tracing.go generated vendored Normal file
View file

@ -0,0 +1,190 @@
package tracing
// Copyright 2018 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"context"
"fmt"
"net/http"
"os"
"contrib.go.opencensus.io/exporter/ocagent"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
)
var (
// Transport is the default tracing RoundTripper. The custom options setter will control
// if traces are being emitted or not.
Transport = &ochttp.Transport{
Propagation: &tracecontext.HTTPFormat{},
GetStartOptions: getStartOptions,
}
// enabled is the flag for marking if tracing is enabled.
enabled = false
// Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise
// it will be using the parent sampler or the default.
sampler = trace.NeverSample()
// Views for metric instrumentation.
views = map[string]*view.View{}
// the trace exporter
traceExporter trace.Exporter
)
func init() {
enableFromEnv()
}
func enableFromEnv() {
_, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED")
_, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD")
if ok || legacyOk {
agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT")
if ok {
EnableWithAIForwarding(agentEndpoint)
} else {
Enable()
}
}
}
// IsEnabled returns true if monitoring is enabled for the sdk.
func IsEnabled() bool {
return enabled
}
// Enable will start instrumentation for metrics and traces.
func Enable() error {
enabled = true
sampler = nil
err := initStats()
return err
}
// Disable will disable instrumentation for metrics and traces.
func Disable() {
disableStats()
sampler = trace.NeverSample()
if traceExporter != nil {
trace.UnregisterExporter(traceExporter)
}
enabled = false
}
// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder
// exporter making the metrics and traces available in app insights.
func EnableWithAIForwarding(agentEndpoint string) (err error) {
err = Enable()
if err != nil {
return err
}
traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint))
if err != nil {
return err
}
trace.RegisterExporter(traceExporter)
return
}
// getStartOptions is the custom options setter for the ochttp package.
func getStartOptions(*http.Request) trace.StartOptions {
return trace.StartOptions{
Sampler: sampler,
}
}
// initStats registers the views for the http metrics
func initStats() (err error) {
clientViews := []*view.View{
ochttp.ClientCompletedCount,
ochttp.ClientRoundtripLatencyDistribution,
ochttp.ClientReceivedBytesDistribution,
ochttp.ClientSentBytesDistribution,
}
for _, cv := range clientViews {
vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name)
views[vn] = cv.WithName(vn)
err = view.Register(views[vn])
if err != nil {
return err
}
}
return
}
// disableStats will unregister the previously registered metrics
func disableStats() {
for _, v := range views {
view.Unregister(v)
}
}
// StartSpan starts a trace span
func StartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler))
return ctx
}
// EndSpan ends a previously started span stored in the context
func EndSpan(ctx context.Context, httpStatusCode int, err error) {
span := trace.FromContext(ctx)
if span == nil {
return
}
if err != nil {
span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)})
}
span.End()
}
// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined
// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status
func toTraceStatusCode(httpStatusCode int) int32 {
switch {
case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest:
return trace.StatusCodeOK
case httpStatusCode == http.StatusBadRequest:
return trace.StatusCodeInvalidArgument
case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated.
return trace.StatusCodeUnauthenticated
case httpStatusCode == http.StatusForbidden:
return trace.StatusCodePermissionDenied
case httpStatusCode == http.StatusNotFound:
return trace.StatusCodeNotFound
case httpStatusCode == http.StatusTooManyRequests:
return trace.StatusCodeResourceExhausted
case httpStatusCode == 499:
return trace.StatusCodeCancelled
case httpStatusCode == http.StatusNotImplemented:
return trace.StatusCodeUnimplemented
case httpStatusCode == http.StatusServiceUnavailable:
return trace.StatusCodeUnavailable
case httpStatusCode == http.StatusGatewayTimeout:
return trace.StatusCodeDeadlineExceeded
default:
return trace.StatusCodeUnknown
}
}

View file

@ -1 +1,2 @@
logrus
vendor

View file

@ -1,3 +1,15 @@
# 1.2.0
This new release introduces:
* A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
* A new trace level named `Trace` whose level is below `Debug`
* A configurable exit function to be called upon a Fatal trace
* The `Level` object now implements `encoding.TextUnmarshaler` interface
# 1.1.1
This is a bug fix release.
* fix the build break on Solaris
* don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
# 1.1.0
This new release introduces:
* several fixes:

View file

@ -56,8 +56,39 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
exit status 1
```
To ensure this behaviour even if a TTY is attached, set your formatter as follows:
```go
log.SetFormatter(&log.TextFormatter{
DisableColors: true,
FullTimestamp: true,
})
```
#### Logging Method Name
If you wish to add the calling method as a field, instruct the logger via:
```go
log.SetReportCaller(true)
```
This adds the caller as 'method' like so:
```json
{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
```
```text
time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
```
Note that this does add measurable overhead - the cost will depend on the version of Go, but is
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
environment via benchmarks:
```
go test -bench=.*CallerTracing
```
#### Case-sensitivity
@ -246,9 +277,10 @@ A list of currently known of service hook can be found in this wiki [page](https
#### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
```go
log.Trace("Something very low level.")
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")

View file

@ -4,11 +4,30 @@ import (
"bytes"
"fmt"
"os"
"reflect"
"runtime"
"strings"
"sync"
"time"
)
var bufferPool *sync.Pool
var (
bufferPool *sync.Pool
// qualified package name, cached at first use
logrusPackage string
// Positions in the call stack when tracing to report the calling method
minimumCallerDepth int
// Used for caller information initialisation
callerInitOnce sync.Once
)
const (
maximumCallerDepth int = 25
knownLogrusFrames int = 4
)
func init() {
bufferPool = &sync.Pool{
@ -16,15 +35,18 @@ func init() {
return new(bytes.Buffer)
},
}
// start at the bottom of the stack before the package-name cache is primed
minimumCallerDepth = 1
}
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
// reused and passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
@ -34,22 +56,28 @@ type Entry struct {
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
// Calling method, with package name
Caller *runtime.Frame
// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
Message string
// When formatter is called in entry.log(), a Buffer may be set to entry
Buffer *bytes.Buffer
// err may contain a field formatting error
err string
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is five fields, give a little extra room
Data: make(Fields, 5),
// Default is three fields, plus one optional. Give a little extra room.
Data: make(Fields, 6),
}
}
@ -80,10 +108,18 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range entry.Data {
data[k] = v
}
var field_err string
for k, v := range fields {
data[k] = v
if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func {
field_err = fmt.Sprintf("can not add field %q", k)
if entry.err != "" {
field_err = entry.err + ", " + field_err
}
} else {
data[k] = v
}
}
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time}
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err}
}
// Overrides the time of the Entry.
@ -91,6 +127,57 @@ func (entry *Entry) WithTime(t time.Time) *Entry {
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
}
// getPackageName reduces a fully qualified function name to the package name
// There really ought to be to be a better way...
func getPackageName(f string) string {
for {
lastPeriod := strings.LastIndex(f, ".")
lastSlash := strings.LastIndex(f, "/")
if lastPeriod > lastSlash {
f = f[:lastPeriod]
} else {
break
}
}
return f
}
// getCaller retrieves the name of the first non-logrus calling function
func getCaller() *runtime.Frame {
// Restrict the lookback frames to avoid runaway lookups
pcs := make([]uintptr, maximumCallerDepth)
depth := runtime.Callers(minimumCallerDepth, pcs)
frames := runtime.CallersFrames(pcs[:depth])
// cache this package's fully-qualified name
callerInitOnce.Do(func() {
logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name())
// now that we have the cache, we can skip a minimum count of known-logrus functions
// XXX this is dubious, the number of frames may vary store an entry in a logger interface
minimumCallerDepth = knownLogrusFrames
})
for f, again := frames.Next(); again; f, again = frames.Next() {
pkg := getPackageName(f.Function)
// If the caller isn't part of this package, we're done
if pkg != logrusPackage {
return &f
}
}
// if we got here, we failed to find the caller's context
return nil
}
func (entry Entry) HasCaller() (has bool) {
return entry.Logger != nil &&
entry.Logger.ReportCaller &&
entry.Caller != nil
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
@ -107,6 +194,9 @@ func (entry Entry) log(level Level, msg string) {
entry.Level = level
entry.Message = msg
if entry.Logger.ReportCaller {
entry.Caller = getCaller()
}
entry.fireHooks()
@ -150,6 +240,12 @@ func (entry *Entry) write() {
}
}
func (entry *Entry) Trace(args ...interface{}) {
if entry.Logger.IsLevelEnabled(TraceLevel) {
entry.log(TraceLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.log(DebugLevel, fmt.Sprint(args...))
@ -186,7 +282,7 @@ func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.log(FatalLevel, fmt.Sprint(args...))
}
Exit(1)
entry.Logger.Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
@ -198,6 +294,12 @@ func (entry *Entry) Panic(args ...interface{}) {
// Entry Printf family functions
func (entry *Entry) Tracef(format string, args ...interface{}) {
if entry.Logger.IsLevelEnabled(TraceLevel) {
entry.Trace(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.Debug(fmt.Sprintf(format, args...))
@ -234,7 +336,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.Fatal(fmt.Sprintf(format, args...))
}
Exit(1)
entry.Logger.Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
@ -245,6 +347,12 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
// Entry Println family functions
func (entry *Entry) Traceln(args ...interface{}) {
if entry.Logger.IsLevelEnabled(TraceLevel) {
entry.Trace(entry.sprintlnn(args...))
}
}
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.Debug(entry.sprintlnn(args...))
@ -281,7 +389,7 @@ func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.Fatal(entry.sprintlnn(args...))
}
Exit(1)
entry.Logger.Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {

View file

@ -24,6 +24,12 @@ func SetFormatter(formatter Formatter) {
std.SetFormatter(formatter)
}
// SetReportCaller sets whether the standard logger will include the calling
// method as a field.
func SetReportCaller(include bool) {
std.SetReportCaller(include)
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.SetLevel(level)
@ -77,6 +83,11 @@ func WithTime(t time.Time) *Entry {
return std.WithTime(t)
}
// Trace logs a message at level Trace on the standard logger.
func Trace(args ...interface{}) {
std.Trace(args...)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
@ -117,6 +128,11 @@ func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Tracef logs a message at level Trace on the standard logger.
func Tracef(format string, args ...interface{}) {
std.Tracef(format, args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
@ -157,6 +173,11 @@ func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Traceln logs a message at level Trace on the standard logger.
func Traceln(args ...interface{}) {
std.Traceln(args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)

View file

@ -2,7 +2,16 @@ package logrus
import "time"
const defaultTimestampFormat = time.RFC3339
// Default key names for the default fields
const (
defaultTimestampFormat = time.RFC3339
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
FieldKeyLogrusError = "logrus_error"
FieldKeyFunc = "func"
FieldKeyFile = "file"
)
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
@ -18,7 +27,7 @@ type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
@ -30,7 +39,7 @@ type Formatter interface {
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields, fieldMap FieldMap) {
func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
timeKey := fieldMap.resolve(FieldKeyTime)
if t, ok := data[timeKey]; ok {
data["fields."+timeKey] = t
@ -48,4 +57,22 @@ func prefixFieldClashes(data Fields, fieldMap FieldMap) {
data["fields."+levelKey] = l
delete(data, levelKey)
}
logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
if l, ok := data[logrusErrKey]; ok {
data["fields."+logrusErrKey] = l
delete(data, logrusErrKey)
}
// If reportCaller is not set, 'func' will not conflict.
if reportCaller {
funcKey := fieldMap.resolve(FieldKeyFunc)
if l, ok := data[funcKey]; ok {
data["fields."+funcKey] = l
}
fileKey := fieldMap.resolve(FieldKeyFile)
if l, ok := data[fileKey]; ok {
data["fields."+fileKey] = l
}
}
}

View file

@ -2,8 +2,9 @@ module github.com/sirupsen/logrus
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe
github.com/konsorten/go-windows-terminal-sequences v1.0.1
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/objx v0.1.1 // indirect
github.com/stretchr/testify v1.2.2
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33

View file

@ -2,8 +2,11 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=

View file

@ -11,13 +11,6 @@ type fieldKey string
// FieldMap allows customization of the key names for default fields.
type FieldMap map[fieldKey]string
// Default key names for the default fields
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
@ -41,9 +34,10 @@ type JSONFormatter struct {
// As an example:
// formatter := &JSONFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyMsg: "@message",
// FieldKeyMsg: "@message",
// FieldKeyFunc: "@caller",
// },
// }
FieldMap FieldMap
@ -54,7 +48,7 @@ type JSONFormatter struct {
// Format renders a single log entry
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
data := make(Fields, len(entry.Data)+4)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
@ -72,18 +66,25 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data = newData
}
prefixFieldClashes(data, f.FieldMap)
prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = defaultTimestampFormat
}
if entry.err != "" {
data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
}
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
if entry.HasCaller() {
data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function
data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
}
var b *bytes.Buffer
if entry.Buffer != nil {

View file

@ -24,6 +24,10 @@ type Logger struct {
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// Flag for whether to log caller info (off by default)
ReportCaller bool
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged.
@ -32,8 +36,12 @@ type Logger struct {
mu MutexWrap
// Reusable empty entry
entryPool sync.Pool
// Function to exit the application, defaults to `os.Exit()`
ExitFunc exitFunc
}
type exitFunc func(int)
type MutexWrap struct {
lock sync.Mutex
disabled bool
@ -69,10 +77,12 @@ func (mw *MutexWrap) Disable() {
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stderr,
Formatter: new(TextFormatter),
Hooks: make(LevelHooks),
Level: InfoLevel,
Out: os.Stderr,
Formatter: new(TextFormatter),
Hooks: make(LevelHooks),
Level: InfoLevel,
ExitFunc: os.Exit,
ReportCaller: false,
}
}
@ -121,6 +131,14 @@ func (logger *Logger) WithTime(t time.Time) *Entry {
return entry.WithTime(t)
}
func (logger *Logger) Tracef(format string, args ...interface{}) {
if logger.IsLevelEnabled(TraceLevel) {
entry := logger.newEntry()
entry.Tracef(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry()
@ -173,7 +191,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
}
Exit(1)
logger.Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
@ -184,6 +202,14 @@ func (logger *Logger) Panicf(format string, args ...interface{}) {
}
}
func (logger *Logger) Trace(args ...interface{}) {
if logger.IsLevelEnabled(TraceLevel) {
entry := logger.newEntry()
entry.Trace(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry()
@ -236,7 +262,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
entry.Fatal(args...)
logger.releaseEntry(entry)
}
Exit(1)
logger.Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
@ -247,6 +273,14 @@ func (logger *Logger) Panic(args ...interface{}) {
}
}
func (logger *Logger) Traceln(args ...interface{}) {
if logger.IsLevelEnabled(TraceLevel) {
entry := logger.newEntry()
entry.Traceln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry()
@ -299,7 +333,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
entry.Fatalln(args...)
logger.releaseEntry(entry)
}
Exit(1)
logger.Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
@ -310,6 +344,14 @@ func (logger *Logger) Panicln(args ...interface{}) {
}
}
func (logger *Logger) Exit(code int) {
runHandlers()
if logger.ExitFunc == nil {
logger.ExitFunc = os.Exit
}
logger.ExitFunc(code)
}
//When file is opened with appending mode, it's safe to
//write concurrently to a file (within 4k message on Linux).
//In these cases user can choose to disable the lock.
@ -357,6 +399,12 @@ func (logger *Logger) SetOutput(output io.Writer) {
logger.Out = output
}
func (logger *Logger) SetReportCaller(reportCaller bool) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.ReportCaller = reportCaller
}
// ReplaceHooks replaces the logger hooks and returns the old ones
func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
logger.mu.Lock()

View file

@ -15,6 +15,8 @@ type Level uint32
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case TraceLevel:
return "trace"
case DebugLevel:
return "debug"
case InfoLevel:
@ -47,12 +49,26 @@ func ParseLevel(lvl string) (Level, error) {
return InfoLevel, nil
case "debug":
return DebugLevel, nil
case "trace":
return TraceLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (level *Level) UnmarshalText(text []byte) error {
l, err := ParseLevel(string(text))
if err != nil {
return err
}
*level = Level(l)
return nil
}
// A constant exposing all logging levels
var AllLevels = []Level{
PanicLevel,
@ -61,6 +77,7 @@ var AllLevels = []Level{
WarnLevel,
InfoLevel,
DebugLevel,
TraceLevel,
}
// These are the different logging levels. You can set the logging level to log
@ -69,7 +86,7 @@ const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
@ -82,6 +99,8 @@ const (
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
// TraceLevel level. Designates finer-grained informational events than the Debug.
TraceLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
@ -148,3 +167,12 @@ type FieldLogger interface {
// IsFatalEnabled() bool
// IsPanicEnabled() bool
}
// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
// here for consistancy. Do not use. Use Logger or Entry instead.
type Ext1FieldLogger interface {
FieldLogger
Tracef(format string, args ...interface{})
Trace(args ...interface{})
Traceln(args ...interface{})
}

View file

@ -1,17 +0,0 @@
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine,!js
package logrus
import (
"io"
"golang.org/x/sys/unix"
)
const ioctlReadTermios = unix.TIOCGETA
type Termios unix.Termios
func initTerminal(w io.Writer) {
}

View file

@ -1,21 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine,!js
package logrus
import (
"io"
"golang.org/x/sys/unix"
)
const ioctlReadTermios = unix.TCGETS
type Termios unix.Termios
func initTerminal(w io.Writer) {
}

View file

@ -0,0 +1,8 @@
// +build !windows
package logrus
import "io"
func initTerminal(w io.Writer) {
}

View file

@ -107,14 +107,14 @@ func (f *TextFormatter) isColored() bool {
// Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
prefixFieldClashes(entry.Data, f.FieldMap)
prefixFieldClashes(entry.Data, f.FieldMap, entry.HasCaller())
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
fixedKeys := make([]string, 0, 3+len(entry.Data))
fixedKeys := make([]string, 0, 4+len(entry.Data))
if !f.DisableTimestamp {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
}
@ -122,6 +122,13 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
if entry.Message != "" {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
}
if entry.err != "" {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
}
if entry.HasCaller() {
fixedKeys = append(fixedKeys,
f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile))
}
if !f.DisableSorting {
if f.SortingFunc == nil {
@ -157,13 +164,19 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
} else {
for _, key := range fixedKeys {
var value interface{}
switch key {
case f.FieldMap.resolve(FieldKeyTime):
switch {
case key == f.FieldMap.resolve(FieldKeyTime):
value = entry.Time.Format(timestampFormat)
case f.FieldMap.resolve(FieldKeyLevel):
case key == f.FieldMap.resolve(FieldKeyLevel):
value = entry.Level.String()
case f.FieldMap.resolve(FieldKeyMsg):
case key == f.FieldMap.resolve(FieldKeyMsg):
value = entry.Message
case key == f.FieldMap.resolve(FieldKeyLogrusError):
value = entry.err
case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
value = entry.Caller.Function
case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
default:
value = entry.Data[key]
}
@ -178,7 +191,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int
switch entry.Level {
case DebugLevel:
case DebugLevel, TraceLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
@ -197,12 +210,19 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
// the behavior of logrus text_formatter the same as the stdlib log package
entry.Message = strings.TrimSuffix(entry.Message, "\n")
caller := ""
if entry.HasCaller() {
caller = fmt.Sprintf("%s:%d %s()",
entry.Caller.File, entry.Caller.Line, entry.Caller.Function)
}
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
} else if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
}
for _, k := range keys {
v := entry.Data[k]

View file

@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
var printFunc func(args ...interface{})
switch level {
case TraceLevel:
printFunc = entry.Trace
case DebugLevel:
printFunc = entry.Debug
case InfoLevel:

View file

@ -0,0 +1,36 @@
# Gradle
.gradle
build
gen_gradle
protobuf-gradle-plugin.i*
gradle.properties
local.properties
# Bazel
bazel-*
# Maven
target
# IntelliJ IDEA
.idea
*.iml
# Eclipse
.classpath
.project
.settings
bin
# OS X
.DS_Store
# Emacs
*~
\#*\#
# VS Code
.vscode
# Other
TAGS

View file

@ -0,0 +1,49 @@
sudo: required
dist: trusty
matrix:
include:
- language: java
jdk: oraclejdk8
env: BUILD=BAZEL
- language: java
jdk: oraclejdk8
env: BUILD=GRADLE
before_install:
- case "$BUILD" in
"BAZEL")
echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list ;
curl https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg | sudo apt-key add - ;
sudo apt-get update ;;
esac
install:
- case "$BUILD" in
"BAZEL")
sudo apt-get install bazel ;
bazel version;;
esac
script:
- case "$BUILD" in
"BAZEL")
cd src;
bazel build --show_result=100 ... ;;
"GRADLE")
./gradlew clean assemble --stacktrace ;
./gradlew check --stacktrace ;;
*)
echo "Missing case $BUILD" ;
exit 1 ;;
esac
before_cache:
- rm -f $HOME/.gradle/caches/modules-2/modules-2.lock
cache:
directories:
- $HOME/.gradle
- $HOME/.gradle/caches/
- $HOME/.gradle/wrapper/

View file

@ -0,0 +1 @@
Google Inc.

View file

@ -0,0 +1,26 @@
# How to submit a bug report
If you received an error message, please include it and any exceptions.
We commonly need to know which language you are using (e.g. Java) and what
platform you are on:
* Operating system (i.e., ```uname -a```)
# How to contribute
We definitely welcome patches and contributions to Census! Here are some
guideline and information about how to do so.
## Before getting started
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
We follow the [Google Proto Style Guide](
https://developers.google.com/protocol-buffers/docs/style).
## Proposing changes
Make sure that `bazel build :all` completes successfully without any new warnings.
Then create a Pull Request with your changes. When the changes are accepted, they
will be merged or cherry-picked by a Census core developer.

View file

@ -0,0 +1,80 @@
OpenCensus Proto - Language Independent Interface Types For OpenCensus
===============================================================
[![Build Status][travis-image]][travis-url]
[![Maven Central][maven-image]][maven-url]
Census provides a framework to define and collect stats against metrics and to
break those stats down across user-defined dimensions.
The Census framework is natively available in many languages (e.g. C++, Go,
and Java). The API interface types are defined using protos to ensure
consistency and interoperability for the different implementations.
## Quickstart
### Install to Go
```bash
$ go get -u github.com/census-instrumentation/opencensus-proto
```
In most cases you should depend on the gen-go files directly. If you are
building with Bazel, there are also go_proto_library build rules available.
See [PR/132](https://github.com/census-instrumentation/opencensus-proto/pull/132)
for details. However, please note that Bazel doesn't generate the final
artifacts.
### Add the dependencies to your Java project
For Maven add to `pom.xml`:
```xml
<dependency>
<groupId>io.opencensus</groupId>
<artifactId>opencensus-proto</artifactId>
<version>0.0.2</version>
</dependency>
```
For Gradle add to dependencies:
```gradle
compile 'io.opencensus:opencensus-proto:0.0.2'
```
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-proto.svg?branch=master
[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-proto
[maven-image]: https://maven-badges.herokuapp.com/maven-central/io.opencensus/opencensus-proto/badge.svg
[maven-url]: https://maven-badges.herokuapp.com/maven-central/io.opencensus/opencensus-proto
### Add the dependencies to Bazel project
In WORKSPACE, add:
```
git_repository(
name = "io_opencensus_proto",
strip_prefix = "src",
tag = "v0.0.2", # CURRENT_OPENCENSUS_PROTO_VERSION
remote = "https://github.com/census-instrumentation/opencensus-proto",
)
```
or
```
http_archive(
name = "io_opencensus_proto",
strip_prefix = "opencensus-proto-master/src",
urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/master.zip"],
)
```
In BUILD.bazel:
```bazel
proto_library(
name = "foo_proto",
srcs = ["foo.proto"],
deps = [
"@io_opencensus_proto//opencensus/proto/metrics/v1:metrics_proto",
"@io_opencensus_proto//opencensus/proto/trace/v1:trace_proto",
# etc.
],
)
```

View file

@ -0,0 +1,190 @@
# How to Create a Release of OpenCensus Proto (for Maintainers Only)
## Build Environments
We re-generate gen-go files and deploy jars to Maven Central under the following systems:
- Ubuntu 14.04
Other systems may also work, but we haven't verified them.
## Release Go files
To generate the Go files from protos, you'll need to install protoc and protoc-gen-go plugin first.
Follow the instructions [here](http://google.github.io/proto-lens/installing-protoc.html) and
[here](https://github.com/golang/protobuf#installation).
Then run the following commands to re-generate the gen-go files:
```bash
$ cd $(go env GOPATH)/src/github.com/census-instrumentation/opencensus-proto
$ git checkout -b update-gen-go
$ rm -rf gen-go
$ cd src
$ ./mkgogen.sh
$ git add -A
$ git commit -m "Update gen-go files."
```
Go through PR review and merge the changes to GitHub.
## Tagging the Release
Our release branches follow the naming convention of `v<major>.<minor>.x`, while the tags include the
patch version `v<major>.<minor>.<patch>`. For example, the same branch `v0.4.x` would be used to create
all `v0.4` tags (e.g. `v0.4.0`, `v0.4.1`).
In this section upstream repository refers to the main opencensus-proto github
repository.
Before any push to the upstream repository you need to create a [personal access
token](https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/).
1. Create the release branch and push it to GitHub:
```bash
$ MAJOR=0 MINOR=4 PATCH=0 # Set appropriately for new release
$ VERSION_FILES=(
build.gradle
pom.xml
)
$ git checkout -b v$MAJOR.$MINOR.x master
$ git push upstream v$MAJOR.$MINOR.x
```
2. Enable branch protection for the new branch, if you have admin access.
Otherwise, let someone with admin access know that there is a new release
branch.
- Open the branch protection settings for the new branch, by following
[Github's instructions](https://help.github.com/articles/configuring-protected-branches/).
- Copy the settings from a previous branch, i.e., check
- `Protect this branch`
- `Require pull request reviews before merging`
- `Require status checks to pass before merging`
- `Include administrators`
Enable the following required status checks:
- `cla/google`
- `continuous-integration/travis-ci`
- Uncheck everything else.
- Click "Save changes".
3. For `master` branch:
- Change root build files to the next minor snapshot (e.g.
`0.5.0-SNAPSHOT`).
```bash
$ git checkout -b bump-version master
# Change version to next minor (and keep -SNAPSHOT)
$ sed -i 's/[0-9]\+\.[0-9]\+\.[0-9]\+\(.*CURRENT_OPENCENSUS_PROTO_VERSION\)/'$MAJOR.$((MINOR+1)).0'\1/' \
"${VERSION_FILES[@]}"
$ ./gradlew build
$ git commit -a -m "Start $MAJOR.$((MINOR+1)).0 development cycle"
```
- Go through PR review and push the master branch to GitHub:
```bash
$ git checkout master
$ git merge --ff-only bump-version
$ git push upstream master
```
4. For `vMajor.Minor.x` branch:
- Change root build files to remove "-SNAPSHOT" for the next release
version (e.g. `0.4.0`). Commit the result and make a tag:
```bash
$ git checkout -b release v$MAJOR.$MINOR.x
# Change version to remove -SNAPSHOT
$ sed -i 's/-SNAPSHOT\(.*CURRENT_OPENCENSUS_PROTO_VERSION\)/\1/' "${VERSION_FILES[@]}"
$ ./gradlew build
$ git commit -a -m "Bump version to $MAJOR.$MINOR.$PATCH"
$ git tag -a v$MAJOR.$MINOR.$PATCH -m "Version $MAJOR.$MINOR.$PATCH"
```
- Change root build files to the next snapshot version (e.g.
`0.4.1-SNAPSHOT`). Commit the result:
```bash
# Change version to next patch and add -SNAPSHOT
$ sed -i 's/[0-9]\+\.[0-9]\+\.[0-9]\+\(.*CURRENT_OPENCENSUS_PROTO_VERSION\)/'$MAJOR.$MINOR.$((PATCH+1))-SNAPSHOT'\1/' \
"${VERSION_FILES[@]}"
$ ./gradlew build
$ git commit -a -m "Bump version to $MAJOR.$MINOR.$((PATCH+1))-SNAPSHOT"
```
- Go through PR review and push the release tag and updated release branch
to GitHub:
```bash
$ git checkout v$MAJOR.$MINOR.x
$ git merge --ff-only release
$ git push upstream v$MAJOR.$MINOR.$PATCH
$ git push upstream v$MAJOR.$MINOR.x
```
## Release Java Jar
Deployment to Maven Central (or the snapshot repo) is for all of the artifacts
from the project.
### Prerequisites
If you haven't done already, please follow the instructions
[here](https://github.com/census-instrumentation/opencensus-java/blob/master/RELEASING.md#prerequisites)
to set up the OSSRH (OSS Repository Hosting) account and signing keys. This is required for releasing
to Maven Central.
### Branch
Before building/deploying, be sure to switch to the appropriate tag. The tag
must reference a commit that has been pushed to the main repository, i.e., has
gone through code review. For the current release use:
```bash
$ git checkout -b v$MAJOR.$MINOR.$PATCH tags/v$MAJOR.$MINOR.$PATCH
```
### Initial Deployment
The following command will build the whole project and upload it to Maven
Central. Parallel building [is not safe during
uploadArchives](https://issues.gradle.org/browse/GRADLE-3420).
```bash
$ ./gradlew clean build && ./gradlew -Dorg.gradle.parallel=false uploadArchives
```
If the version has the `-SNAPSHOT` suffix, the artifacts will automatically go
to the snapshot repository. Otherwise it's a release deployment and the
artifacts will go to a staging repository.
When deploying a Release, the deployment will create [a new staging
repository](https://oss.sonatype.org/#stagingRepositories). You'll need to look
up the ID in the OSSRH UI (usually in the form of `opencensus-*`).
### Releasing on Maven Central
Once all of the artifacts have been pushed to the staging repository, the
repository must first be `closed`, which will trigger several sanity checks on
the repository. If this completes successfully, the repository can then be
`released`, which will begin the process of pushing the new artifacts to Maven
Central (the staging repository will be destroyed in the process). You can see
the complete process for releasing to Maven Central on the [OSSRH
site](http://central.sonatype.org/pages/releasing-the-deployment.html).
## Announcement
Once deployment is done, go to Github [release
page](https://github.com/census-instrumentation/opencensus-proto/releases), press
`Draft a new release` to write release notes about the new release.
You can use `git log upstream/v$MAJOR.$((MINOR-1)).x..upstream/v$MAJOR.$MINOR.x --graph --first-parent`
or the Github [compare tool](https://github.com/census-instrumentation/opencensus-proto/compare/)
to view a summary of all commits since last release as a reference.
Please pick major or important user-visible changes only.

View file

@ -0,0 +1,170 @@
description = 'Opencensus Proto'
apply plugin: 'idea'
apply plugin: 'java'
apply plugin: 'com.google.protobuf'
apply plugin: 'maven'
apply plugin: "signing"
group = "io.opencensus"
version = "0.1.0" // CURRENT_OPENCENSUS_PROTO_VERSION
sourceCompatibility = 1.6
targetCompatibility = 1.6
repositories {
maven { url "https://plugins.gradle.org/m2/" }
}
jar.manifest {
attributes('Implementation-Title': name,
'Implementation-Version': version,
'Built-By': System.getProperty('user.name'),
'Built-JDK': System.getProperty('java.version'),
'Source-Compatibility': sourceCompatibility,
'Target-Compatibility': targetCompatibility)
}
def protobufVersion = '3.5.1'
def protocVersion = '3.5.1'
def grpcVersion = "1.14.0" // CURRENT_GRPC_VERSION
buildscript {
repositories {
maven { url "https://plugins.gradle.org/m2/" }
}
dependencies {
classpath "com.google.protobuf:protobuf-gradle-plugin:0.8.6"
}
}
sourceSets {
main {
proto {
srcDir 'src'
}
}
}
dependencies {
compile "com.google.protobuf:protobuf-java:${protobufVersion}",
"io.grpc:grpc-protobuf:${grpcVersion}",
"io.grpc:grpc-stub:${grpcVersion}"
compileOnly "javax.annotation:javax.annotation-api:1.2"
}
protobuf {
protoc {
// The artifact spec for the Protobuf Compiler
artifact = "com.google.protobuf:protoc:${protocVersion}"
}
plugins {
grpc {
artifact = "io.grpc:protoc-gen-grpc-java:${grpcVersion}"
}
}
generateProtoTasks {
all()*.plugins {
grpc {}
}
ofSourceSet('main')
}
generatedFilesBaseDir = "$projectDir/gen_gradle/src"
}
// Disable all java warnings for proto generated files build
compileJava {
options.compilerArgs += ["-Xlint:none"]
options.encoding = "UTF-8"
}
clean {
delete protobuf.generatedFilesBaseDir
}
// IntelliJ complains that the generated classes are not found, ask IntelliJ to include the
// generated Java directories as source folders.
idea {
module {
sourceDirs += file("${protobuf.generatedFilesBaseDir}/main/java");
// If you have additional sourceSets and/or codegen plugins, add all of them
}
}
signing {
required false
sign configurations.archives
}
javadoc.source = "$projectDir/gen_gradle/src"
javadoc.options {
encoding = 'UTF-8'
links 'https://docs.oracle.com/javase/8/docs/api/'
}
task javadocJar(type: Jar) {
classifier = 'javadoc'
from javadoc
}
task sourcesJar(type: Jar) {
classifier = 'sources'
from sourceSets.main.allSource
}
artifacts {
archives javadocJar, sourcesJar
}
uploadArchives {
repositories {
mavenDeployer {
beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) }
def configureAuth = {
if (rootProject.hasProperty('ossrhUsername') && rootProject.hasProperty('ossrhPassword')) {
authentication(userName:rootProject.ossrhUsername, password: rootProject.ossrhPassword)
}
}
repository(url: "https://oss.sonatype.org/service/local/staging/deploy/maven2/", configureAuth)
snapshotRepository(url: "https://oss.sonatype.org/content/repositories/snapshots/", configureAuth)
pom.project {
name "OpenCensus"
packaging 'jar'
description project.description
url 'https://github.com/census-instrumentation/opencensus-proto'
scm {
connection 'scm:svn:https://github.com/census-instrumentation/opencensus-proto'
developerConnection 'scm:git:git@github.com/census-instrumentation/opencensus-proto'
url 'https://github.com/census-instrumentation/opencensus-proto'
}
licenses {
license {
name 'The Apache License, Version 2.0'
url 'http://www.apache.org/licenses/LICENSE-2.0.txt'
}
}
developers {
developer {
id 'io.opencensus'
name 'OpenCensus Contributors'
email 'census-developers@googlegroups.com'
url 'opencensus.io'
// https://issues.gradle.org/browse/GRADLE-2719
organization = 'OpenCensus Authors'
organizationUrl 'https://www.opencensus.io'
}
}
}
}
}
}

View file

@ -0,0 +1,356 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: opencensus/proto/agent/common/v1/common.proto
package v1
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type LibraryInfo_Language int32
const (
LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0
LibraryInfo_CPP LibraryInfo_Language = 1
LibraryInfo_C_SHARP LibraryInfo_Language = 2
LibraryInfo_ERLANG LibraryInfo_Language = 3
LibraryInfo_GO_LANG LibraryInfo_Language = 4
LibraryInfo_JAVA LibraryInfo_Language = 5
LibraryInfo_NODE_JS LibraryInfo_Language = 6
LibraryInfo_PHP LibraryInfo_Language = 7
LibraryInfo_PYTHON LibraryInfo_Language = 8
LibraryInfo_RUBY LibraryInfo_Language = 9
)
var LibraryInfo_Language_name = map[int32]string{
0: "LANGUAGE_UNSPECIFIED",
1: "CPP",
2: "C_SHARP",
3: "ERLANG",
4: "GO_LANG",
5: "JAVA",
6: "NODE_JS",
7: "PHP",
8: "PYTHON",
9: "RUBY",
}
var LibraryInfo_Language_value = map[string]int32{
"LANGUAGE_UNSPECIFIED": 0,
"CPP": 1,
"C_SHARP": 2,
"ERLANG": 3,
"GO_LANG": 4,
"JAVA": 5,
"NODE_JS": 6,
"PHP": 7,
"PYTHON": 8,
"RUBY": 9,
}
func (x LibraryInfo_Language) String() string {
return proto.EnumName(LibraryInfo_Language_name, int32(x))
}
func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{2, 0}
}
// Identifier metadata of the Node (Application instrumented with OpenCensus)
// that connects to OpenCensus Agent.
// In the future we plan to extend the identifier proto definition to support
// additional information (e.g cloud id, etc.)
type Node struct {
// Identifier that uniquely identifies a process within a VM/container.
Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
// Information on the OpenCensus Library that initiates the stream.
LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"`
// Additional information on service.
ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"`
// Additional attributes.
Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Node) Reset() { *m = Node{} }
func (m *Node) String() string { return proto.CompactTextString(m) }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{0}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Node.Unmarshal(m, b)
}
func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Node.Marshal(b, m, deterministic)
}
func (m *Node) XXX_Merge(src proto.Message) {
xxx_messageInfo_Node.Merge(m, src)
}
func (m *Node) XXX_Size() int {
return xxx_messageInfo_Node.Size(m)
}
func (m *Node) XXX_DiscardUnknown() {
xxx_messageInfo_Node.DiscardUnknown(m)
}
var xxx_messageInfo_Node proto.InternalMessageInfo
func (m *Node) GetIdentifier() *ProcessIdentifier {
if m != nil {
return m.Identifier
}
return nil
}
func (m *Node) GetLibraryInfo() *LibraryInfo {
if m != nil {
return m.LibraryInfo
}
return nil
}
func (m *Node) GetServiceInfo() *ServiceInfo {
if m != nil {
return m.ServiceInfo
}
return nil
}
func (m *Node) GetAttributes() map[string]string {
if m != nil {
return m.Attributes
}
return nil
}
// Identifier that uniquely identifies a process within a VM/container.
type ProcessIdentifier struct {
// The host name. Usually refers to the machine/container name.
// For example: os.Hostname() in Go, socket.gethostname() in Python.
HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
// Process id.
Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
// Start time of this ProcessIdentifier. Represented in epoch time.
StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} }
func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) }
func (*ProcessIdentifier) ProtoMessage() {}
func (*ProcessIdentifier) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{1}
}
func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b)
}
func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic)
}
func (m *ProcessIdentifier) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProcessIdentifier.Merge(m, src)
}
func (m *ProcessIdentifier) XXX_Size() int {
return xxx_messageInfo_ProcessIdentifier.Size(m)
}
func (m *ProcessIdentifier) XXX_DiscardUnknown() {
xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m)
}
var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo
func (m *ProcessIdentifier) GetHostName() string {
if m != nil {
return m.HostName
}
return ""
}
func (m *ProcessIdentifier) GetPid() uint32 {
if m != nil {
return m.Pid
}
return 0
}
func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp {
if m != nil {
return m.StartTimestamp
}
return nil
}
// Information on OpenCensus Library.
type LibraryInfo struct {
// Language of OpenCensus Library.
Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"`
// Version of Agent exporter of Library.
ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"`
// Version of OpenCensus Library.
CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LibraryInfo) Reset() { *m = LibraryInfo{} }
func (m *LibraryInfo) String() string { return proto.CompactTextString(m) }
func (*LibraryInfo) ProtoMessage() {}
func (*LibraryInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{2}
}
func (m *LibraryInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LibraryInfo.Unmarshal(m, b)
}
func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic)
}
func (m *LibraryInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_LibraryInfo.Merge(m, src)
}
func (m *LibraryInfo) XXX_Size() int {
return xxx_messageInfo_LibraryInfo.Size(m)
}
func (m *LibraryInfo) XXX_DiscardUnknown() {
xxx_messageInfo_LibraryInfo.DiscardUnknown(m)
}
var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo
func (m *LibraryInfo) GetLanguage() LibraryInfo_Language {
if m != nil {
return m.Language
}
return LibraryInfo_LANGUAGE_UNSPECIFIED
}
func (m *LibraryInfo) GetExporterVersion() string {
if m != nil {
return m.ExporterVersion
}
return ""
}
func (m *LibraryInfo) GetCoreLibraryVersion() string {
if m != nil {
return m.CoreLibraryVersion
}
return ""
}
// Additional service information.
type ServiceInfo struct {
// Name of the service.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ServiceInfo) Reset() { *m = ServiceInfo{} }
func (m *ServiceInfo) String() string { return proto.CompactTextString(m) }
func (*ServiceInfo) ProtoMessage() {}
func (*ServiceInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{3}
}
func (m *ServiceInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServiceInfo.Unmarshal(m, b)
}
func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic)
}
func (m *ServiceInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServiceInfo.Merge(m, src)
}
func (m *ServiceInfo) XXX_Size() int {
return xxx_messageInfo_ServiceInfo.Size(m)
}
func (m *ServiceInfo) XXX_DiscardUnknown() {
xxx_messageInfo_ServiceInfo.DiscardUnknown(m)
}
var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo
func (m *ServiceInfo) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func init() {
proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value)
proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node")
proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry")
proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier")
proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo")
proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo")
}
func init() {
proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84)
}
var fileDescriptor_126c72ed8a252c84 = []byte{
// 590 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e,
0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee,
0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01,
0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde,
0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9,
0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2,
0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89,
0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9,
0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7,
0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39,
0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b,
0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b,
0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13,
0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06,
0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d,
0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67,
0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2,
0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a,
0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a,
0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76,
0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23,
0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c,
0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c,
0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92,
0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e,
0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51,
0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14,
0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83,
0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0,
0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86,
0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4,
0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd,
0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9,
0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c,
0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70,
0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1,
0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00,
}

View file

@ -0,0 +1,443 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: opencensus/proto/agent/trace/v1/trace_service.proto
package v1
import (
fmt "fmt"
v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
proto "github.com/golang/protobuf/proto"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type CurrentLibraryConfig struct {
// This is required only in the first message on the stream or if the
// previous sent CurrentLibraryConfig message has a different Node (e.g.
// when the same RPC is used to configure multiple Applications).
Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
// Current configuration.
Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} }
func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) }
func (*CurrentLibraryConfig) ProtoMessage() {}
func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_7027f99caf7ac6a5, []int{0}
}
func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b)
}
func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic)
}
func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_CurrentLibraryConfig.Merge(m, src)
}
func (m *CurrentLibraryConfig) XXX_Size() int {
return xxx_messageInfo_CurrentLibraryConfig.Size(m)
}
func (m *CurrentLibraryConfig) XXX_DiscardUnknown() {
xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m)
}
var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo
func (m *CurrentLibraryConfig) GetNode() *v1.Node {
if m != nil {
return m.Node
}
return nil
}
func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig {
if m != nil {
return m.Config
}
return nil
}
type UpdatedLibraryConfig struct {
// This field is ignored when the RPC is used to configure only one Application.
// This is required only in the first message on the stream or if the
// previous sent UpdatedLibraryConfig message has a different Node.
Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
// Requested updated configuration.
Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} }
func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) }
func (*UpdatedLibraryConfig) ProtoMessage() {}
func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_7027f99caf7ac6a5, []int{1}
}
func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b)
}
func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic)
}
func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src)
}
func (m *UpdatedLibraryConfig) XXX_Size() int {
return xxx_messageInfo_UpdatedLibraryConfig.Size(m)
}
func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() {
xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m)
}
var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo
func (m *UpdatedLibraryConfig) GetNode() *v1.Node {
if m != nil {
return m.Node
}
return nil
}
func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig {
if m != nil {
return m.Config
}
return nil
}
type ExportTraceServiceRequest struct {
// This is required only in the first message on the stream or if the
// previous sent ExportTraceServiceRequest message has a different Node (e.g.
// when the same RPC is used to send Spans from multiple Applications).
Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
// A list of Spans that belong to the last received Node.
Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
// The resource for the spans in this message that do not have an explicit
// resource set.
// If unset, the most recently set resource in the RPC stream applies. It is
// valid to never be set within a stream, e.g. when no resource info is known.
Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} }
func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) }
func (*ExportTraceServiceRequest) ProtoMessage() {}
func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_7027f99caf7ac6a5, []int{2}
}
func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b)
}
func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic)
}
func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src)
}
func (m *ExportTraceServiceRequest) XXX_Size() int {
return xxx_messageInfo_ExportTraceServiceRequest.Size(m)
}
func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo
func (m *ExportTraceServiceRequest) GetNode() *v1.Node {
if m != nil {
return m.Node
}
return nil
}
func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span {
if m != nil {
return m.Spans
}
return nil
}
func (m *ExportTraceServiceRequest) GetResource() *v12.Resource {
if m != nil {
return m.Resource
}
return nil
}
type ExportTraceServiceResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} }
func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) }
func (*ExportTraceServiceResponse) ProtoMessage() {}
func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_7027f99caf7ac6a5, []int{3}
}
func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b)
}
func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic)
}
func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src)
}
func (m *ExportTraceServiceResponse) XXX_Size() int {
return xxx_messageInfo_ExportTraceServiceResponse.Size(m)
}
func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo
func init() {
proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig")
proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig")
proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest")
proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse")
}
func init() {
proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5)
}
var fileDescriptor_7027f99caf7ac6a5 = []byte{
// 423 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xbf, 0x6b, 0xdb, 0x40,
0x14, 0xee, 0xd9, 0xad, 0x28, 0xe7, 0x2e, 0x15, 0x1d, 0x54, 0x51, 0xb0, 0x11, 0xb4, 0x18, 0x5a,
0x9d, 0x2a, 0x1b, 0x2f, 0x2e, 0x74, 0xb0, 0x29, 0x74, 0x28, 0xc5, 0xc8, 0xed, 0x92, 0xc5, 0xc8,
0xd2, 0x8b, 0xa2, 0xc1, 0x77, 0xca, 0xdd, 0x49, 0x24, 0x90, 0x2d, 0x43, 0xf6, 0x0c, 0xf9, 0xc3,
0xf2, 0x17, 0x05, 0xdd, 0xc9, 0x3f, 0x12, 0x5b, 0x11, 0x24, 0x4b, 0xb6, 0x87, 0xde, 0xf7, 0x7d,
0xf7, 0xbd, 0x7b, 0xdf, 0x09, 0x0f, 0x59, 0x06, 0x34, 0x02, 0x2a, 0x72, 0xe1, 0x65, 0x9c, 0x49,
0xe6, 0x85, 0x09, 0x50, 0xe9, 0x49, 0x1e, 0x46, 0xe0, 0x15, 0xbe, 0x2e, 0x16, 0x02, 0x78, 0x91,
0x46, 0x40, 0x14, 0xc4, 0xec, 0x6e, 0x49, 0xfa, 0x0b, 0x51, 0x24, 0xa2, 0xb0, 0xa4, 0xf0, 0x6d,
0xb7, 0x46, 0x35, 0x62, 0xab, 0x15, 0xa3, 0xa5, 0xac, 0xae, 0x34, 0xdb, 0xfe, 0xba, 0x07, 0xe7,
0x20, 0x58, 0xce, 0xb5, 0x83, 0x75, 0x5d, 0x81, 0x3f, 0xef, 0x81, 0xef, 0x7b, 0xad, 0x60, 0xdf,
0x1a, 0x60, 0x8b, 0x88, 0xd1, 0xe3, 0x34, 0xd1, 0x68, 0xe7, 0x1a, 0xe1, 0x0f, 0xd3, 0x9c, 0x73,
0xa0, 0xf2, 0x4f, 0xba, 0xe4, 0x21, 0x3f, 0x9f, 0xaa, 0xb6, 0x39, 0xc6, 0xaf, 0x29, 0x8b, 0xc1,
0x42, 0x3d, 0xd4, 0xef, 0x0c, 0xbe, 0x90, 0x9a, 0xc9, 0xab, 0x71, 0x0a, 0x9f, 0xfc, 0x65, 0x31,
0x04, 0x8a, 0x63, 0xfe, 0xc4, 0x86, 0x3e, 0xc4, 0x6a, 0xd5, 0xb1, 0xd7, 0x37, 0x46, 0xfe, 0x95,
0x85, 0x3e, 0x33, 0xa8, 0x58, 0xca, 0xd4, 0xff, 0x2c, 0x0e, 0x25, 0xc4, 0x2f, 0xc7, 0xd4, 0x2d,
0xc2, 0x1f, 0x7f, 0x9d, 0x65, 0x8c, 0x4b, 0xd5, 0x9d, 0xeb, 0x60, 0x04, 0x70, 0x9a, 0x83, 0x90,
0xcf, 0x72, 0x36, 0xc2, 0x6f, 0x44, 0x16, 0x52, 0x61, 0xb5, 0x7a, 0xed, 0x7e, 0x67, 0xd0, 0x7d,
0xc4, 0xd8, 0x3c, 0x0b, 0x69, 0xa0, 0xd1, 0xe6, 0x04, 0xbf, 0x5d, 0x27, 0xc4, 0x6a, 0xd7, 0x1d,
0xbb, 0xc9, 0x50, 0xe1, 0x93, 0xa0, 0xaa, 0x83, 0x0d, 0xcf, 0xf9, 0x84, 0xed, 0x43, 0x33, 0x89,
0x8c, 0x51, 0x01, 0x83, 0x9b, 0x16, 0x7e, 0xb7, 0xdb, 0x30, 0x2f, 0xb0, 0x51, 0x6d, 0x62, 0x44,
0x1a, 0x9e, 0x02, 0x39, 0x94, 0x2a, 0xbb, 0x99, 0x76, 0x68, 0xef, 0xce, 0xab, 0x3e, 0xfa, 0x8e,
0xcc, 0x2b, 0x84, 0x0d, 0xed, 0xd6, 0x1c, 0x37, 0xea, 0xd4, 0xae, 0xca, 0xfe, 0xf1, 0x24, 0xae,
0xbe, 0x12, 0xed, 0x64, 0x72, 0x89, 0xb0, 0x93, 0xb2, 0x26, 0x9d, 0xc9, 0xfb, 0x5d, 0x89, 0x59,
0x89, 0x98, 0xa1, 0xa3, 0xdf, 0x49, 0x2a, 0x4f, 0xf2, 0x65, 0x19, 0x05, 0x4f, 0x93, 0xdd, 0x94,
0x0a, 0xc9, 0xf3, 0x15, 0x50, 0x19, 0xca, 0x94, 0x51, 0x6f, 0xab, 0xeb, 0xea, 0x17, 0x9c, 0x00,
0x75, 0x93, 0x87, 0x7f, 0xa8, 0xa5, 0xa1, 0x9a, 0xc3, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf,
0x9c, 0x9b, 0xf7, 0xcb, 0x04, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// TraceServiceClient is the client API for TraceService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type TraceServiceClient interface {
// After initialization, this RPC must be kept alive for the entire life of
// the application. The agent pushes configs down to applications via a
// stream.
Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error)
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error)
}
type traceServiceClient struct {
cc *grpc.ClientConn
}
func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
return &traceServiceClient{cc}
}
func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) {
stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...)
if err != nil {
return nil, err
}
x := &traceServiceConfigClient{stream}
return x, nil
}
type TraceService_ConfigClient interface {
Send(*CurrentLibraryConfig) error
Recv() (*UpdatedLibraryConfig, error)
grpc.ClientStream
}
type traceServiceConfigClient struct {
grpc.ClientStream
}
func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error {
return x.ClientStream.SendMsg(m)
}
func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) {
m := new(UpdatedLibraryConfig)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) {
stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...)
if err != nil {
return nil, err
}
x := &traceServiceExportClient{stream}
return x, nil
}
type TraceService_ExportClient interface {
Send(*ExportTraceServiceRequest) error
Recv() (*ExportTraceServiceResponse, error)
grpc.ClientStream
}
type traceServiceExportClient struct {
grpc.ClientStream
}
func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) {
m := new(ExportTraceServiceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// TraceServiceServer is the server API for TraceService service.
type TraceServiceServer interface {
// After initialization, this RPC must be kept alive for the entire life of
// the application. The agent pushes configs down to applications via a
// stream.
Config(TraceService_ConfigServer) error
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(TraceService_ExportServer) error
}
func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
s.RegisterService(&_TraceService_serviceDesc, srv)
}
func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream})
}
type TraceService_ConfigServer interface {
Send(*UpdatedLibraryConfig) error
Recv() (*CurrentLibraryConfig, error)
grpc.ServerStream
}
type traceServiceConfigServer struct {
grpc.ServerStream
}
func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error {
return x.ServerStream.SendMsg(m)
}
func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) {
m := new(CurrentLibraryConfig)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream})
}
type TraceService_ExportServer interface {
Send(*ExportTraceServiceResponse) error
Recv() (*ExportTraceServiceRequest, error)
grpc.ServerStream
}
type traceServiceExportServer struct {
grpc.ServerStream
}
func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) {
m := new(ExportTraceServiceRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _TraceService_serviceDesc = grpc.ServiceDesc{
ServiceName: "opencensus.proto.agent.trace.v1.TraceService",
HandlerType: (*TraceServiceServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Config",
Handler: _TraceService_Config_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "Export",
Handler: _TraceService_Export_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto",
}

View file

@ -0,0 +1,99 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: opencensus/proto/resource/v1/resource.proto
package v1
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Resource information.
type Resource struct {
// Type identifier for the resource.
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Set of labels that describe the resource.
Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Resource) Reset() { *m = Resource{} }
func (m *Resource) String() string { return proto.CompactTextString(m) }
func (*Resource) ProtoMessage() {}
func (*Resource) Descriptor() ([]byte, []int) {
return fileDescriptor_584700775a2fc762, []int{0}
}
func (m *Resource) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Resource.Unmarshal(m, b)
}
func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
}
func (m *Resource) XXX_Merge(src proto.Message) {
xxx_messageInfo_Resource.Merge(m, src)
}
func (m *Resource) XXX_Size() int {
return xxx_messageInfo_Resource.Size(m)
}
func (m *Resource) XXX_DiscardUnknown() {
xxx_messageInfo_Resource.DiscardUnknown(m)
}
var xxx_messageInfo_Resource proto.InternalMessageInfo
func (m *Resource) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *Resource) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func init() {
proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource")
proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry")
}
func init() {
proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762)
}
var fileDescriptor_584700775a2fc762 = []byte{
// 234 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd,
0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d,
0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08,
0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe,
0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98,
0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1,
0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25,
0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3,
0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12,
0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf,
0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19,
0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5,
0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99,
0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00,
0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00,
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,406 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: opencensus/proto/trace/v1/trace_config.proto
package v1
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Global configuration of the trace service.
type TraceConfig struct {
// The global default sampler used to make decisions on span sampling.
//
// Types that are valid to be assigned to Sampler:
// *TraceConfig_ProbabilitySampler
// *TraceConfig_ConstantSampler
// *TraceConfig_RateLimitingSampler
Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"`
// The global default max number of attributes per span.
MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"`
// The global default max number of annotation events per span.
MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"`
// The global default max number of message events per span.
MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"`
// The global default max number of link entries per span.
MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TraceConfig) Reset() { *m = TraceConfig{} }
func (m *TraceConfig) String() string { return proto.CompactTextString(m) }
func (*TraceConfig) ProtoMessage() {}
func (*TraceConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_5359209b41ff50c5, []int{0}
}
func (m *TraceConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TraceConfig.Unmarshal(m, b)
}
func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic)
}
func (m *TraceConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_TraceConfig.Merge(m, src)
}
func (m *TraceConfig) XXX_Size() int {
return xxx_messageInfo_TraceConfig.Size(m)
}
func (m *TraceConfig) XXX_DiscardUnknown() {
xxx_messageInfo_TraceConfig.DiscardUnknown(m)
}
var xxx_messageInfo_TraceConfig proto.InternalMessageInfo
type isTraceConfig_Sampler interface {
isTraceConfig_Sampler()
}
type TraceConfig_ProbabilitySampler struct {
ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"`
}
type TraceConfig_ConstantSampler struct {
ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"`
}
type TraceConfig_RateLimitingSampler struct {
RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"`
}
func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {}
func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {}
func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {}
func (m *TraceConfig) GetSampler() isTraceConfig_Sampler {
if m != nil {
return m.Sampler
}
return nil
}
func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler {
if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok {
return x.ProbabilitySampler
}
return nil
}
func (m *TraceConfig) GetConstantSampler() *ConstantSampler {
if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok {
return x.ConstantSampler
}
return nil
}
func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler {
if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok {
return x.RateLimitingSampler
}
return nil
}
func (m *TraceConfig) GetMaxNumberOfAttributes() int64 {
if m != nil {
return m.MaxNumberOfAttributes
}
return 0
}
func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 {
if m != nil {
return m.MaxNumberOfAnnotations
}
return 0
}
func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 {
if m != nil {
return m.MaxNumberOfMessageEvents
}
return 0
}
func (m *TraceConfig) GetMaxNumberOfLinks() int64 {
if m != nil {
return m.MaxNumberOfLinks
}
return 0
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*TraceConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _TraceConfig_OneofMarshaler, _TraceConfig_OneofUnmarshaler, _TraceConfig_OneofSizer, []interface{}{
(*TraceConfig_ProbabilitySampler)(nil),
(*TraceConfig_ConstantSampler)(nil),
(*TraceConfig_RateLimitingSampler)(nil),
}
}
func _TraceConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*TraceConfig)
// sampler
switch x := m.Sampler.(type) {
case *TraceConfig_ProbabilitySampler:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ProbabilitySampler); err != nil {
return err
}
case *TraceConfig_ConstantSampler:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ConstantSampler); err != nil {
return err
}
case *TraceConfig_RateLimitingSampler:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.RateLimitingSampler); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("TraceConfig.Sampler has unexpected type %T", x)
}
return nil
}
func _TraceConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*TraceConfig)
switch tag {
case 1: // sampler.probability_sampler
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ProbabilitySampler)
err := b.DecodeMessage(msg)
m.Sampler = &TraceConfig_ProbabilitySampler{msg}
return true, err
case 2: // sampler.constant_sampler
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ConstantSampler)
err := b.DecodeMessage(msg)
m.Sampler = &TraceConfig_ConstantSampler{msg}
return true, err
case 3: // sampler.rate_limiting_sampler
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(RateLimitingSampler)
err := b.DecodeMessage(msg)
m.Sampler = &TraceConfig_RateLimitingSampler{msg}
return true, err
default:
return false, nil
}
}
func _TraceConfig_OneofSizer(msg proto.Message) (n int) {
m := msg.(*TraceConfig)
// sampler
switch x := m.Sampler.(type) {
case *TraceConfig_ProbabilitySampler:
s := proto.Size(x.ProbabilitySampler)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *TraceConfig_ConstantSampler:
s := proto.Size(x.ConstantSampler)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *TraceConfig_RateLimitingSampler:
s := proto.Size(x.RateLimitingSampler)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
// Sampler that tries to uniformly sample traces with a given probability.
// The probability of sampling a trace is equal to that of the specified probability.
type ProbabilitySampler struct {
// The desired probability of sampling. Must be within [0.0, 1.0].
SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} }
func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) }
func (*ProbabilitySampler) ProtoMessage() {}
func (*ProbabilitySampler) Descriptor() ([]byte, []int) {
return fileDescriptor_5359209b41ff50c5, []int{1}
}
func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b)
}
func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic)
}
func (m *ProbabilitySampler) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProbabilitySampler.Merge(m, src)
}
func (m *ProbabilitySampler) XXX_Size() int {
return xxx_messageInfo_ProbabilitySampler.Size(m)
}
func (m *ProbabilitySampler) XXX_DiscardUnknown() {
xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m)
}
var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo
func (m *ProbabilitySampler) GetSamplingProbability() float64 {
if m != nil {
return m.SamplingProbability
}
return 0
}
// Sampler that makes a constant decision (either always "yes" or always "no")
// on span sampling.
type ConstantSampler struct {
// Whether spans should be always sampled, or never sampled.
Decision bool `protobuf:"varint,1,opt,name=decision,proto3" json:"decision,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ConstantSampler) Reset() { *m = ConstantSampler{} }
func (m *ConstantSampler) String() string { return proto.CompactTextString(m) }
func (*ConstantSampler) ProtoMessage() {}
func (*ConstantSampler) Descriptor() ([]byte, []int) {
return fileDescriptor_5359209b41ff50c5, []int{2}
}
func (m *ConstantSampler) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ConstantSampler.Unmarshal(m, b)
}
func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic)
}
func (m *ConstantSampler) XXX_Merge(src proto.Message) {
xxx_messageInfo_ConstantSampler.Merge(m, src)
}
func (m *ConstantSampler) XXX_Size() int {
return xxx_messageInfo_ConstantSampler.Size(m)
}
func (m *ConstantSampler) XXX_DiscardUnknown() {
xxx_messageInfo_ConstantSampler.DiscardUnknown(m)
}
var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo
func (m *ConstantSampler) GetDecision() bool {
if m != nil {
return m.Decision
}
return false
}
// Sampler that tries to sample with a rate per time window.
type RateLimitingSampler struct {
// Rate per second.
Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} }
func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) }
func (*RateLimitingSampler) ProtoMessage() {}
func (*RateLimitingSampler) Descriptor() ([]byte, []int) {
return fileDescriptor_5359209b41ff50c5, []int{3}
}
func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b)
}
func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic)
}
func (m *RateLimitingSampler) XXX_Merge(src proto.Message) {
xxx_messageInfo_RateLimitingSampler.Merge(m, src)
}
func (m *RateLimitingSampler) XXX_Size() int {
return xxx_messageInfo_RateLimitingSampler.Size(m)
}
func (m *RateLimitingSampler) XXX_DiscardUnknown() {
xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m)
}
var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo
func (m *RateLimitingSampler) GetQps() int64 {
if m != nil {
return m.Qps
}
return 0
}
func init() {
proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig")
proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler")
proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler")
proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler")
}
func init() {
proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5)
}
var fileDescriptor_5359209b41ff50c5 = []byte{
// 431 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x41, 0x6f, 0xd4, 0x30,
0x10, 0x85, 0x09, 0x5b, 0xda, 0x32, 0x3d, 0x74, 0xe5, 0xa8, 0x28, 0x45, 0x3d, 0x54, 0x7b, 0xa1,
0x42, 0x24, 0xa1, 0x70, 0x40, 0x5c, 0x90, 0xd8, 0x0a, 0xc4, 0xa1, 0xc0, 0x2a, 0x20, 0x21, 0x71,
0x09, 0x4e, 0xea, 0x0d, 0x16, 0xf1, 0x38, 0xd8, 0x93, 0xa8, 0xfc, 0x17, 0x7e, 0x6c, 0x15, 0x67,
0x95, 0x64, 0xbb, 0xed, 0xde, 0xec, 0xf7, 0xde, 0xf7, 0x2c, 0x79, 0x6c, 0x78, 0xa1, 0x2b, 0x81,
0xb9, 0x40, 0x5b, 0xdb, 0xb8, 0x32, 0x9a, 0x74, 0x4c, 0x86, 0xe7, 0x22, 0x6e, 0xce, 0xbb, 0x45,
0x9a, 0x6b, 0x5c, 0xca, 0x22, 0x72, 0x1e, 0x3b, 0x1e, 0xd2, 0x9d, 0x12, 0xb9, 0x50, 0xd4, 0x9c,
0xcf, 0xfe, 0xef, 0xc0, 0xc1, 0xf7, 0x76, 0x73, 0xe1, 0x00, 0xf6, 0x0b, 0xfc, 0xca, 0xe8, 0x8c,
0x67, 0xb2, 0x94, 0xf4, 0x2f, 0xb5, 0x5c, 0x55, 0xa5, 0x30, 0x81, 0x77, 0xea, 0x9d, 0x1d, 0xbc,
0x0a, 0xa3, 0x7b, 0x8b, 0xa2, 0xc5, 0x40, 0x7d, 0xeb, 0xa0, 0x4f, 0x0f, 0x12, 0x56, 0x6d, 0xa8,
0xec, 0x07, 0x4c, 0x73, 0x8d, 0x96, 0x38, 0x52, 0x5f, 0xff, 0xd0, 0xd5, 0x3f, 0xdf, 0x52, 0x7f,
0xb1, 0x42, 0x86, 0xee, 0xc3, 0x7c, 0x5d, 0x62, 0x57, 0x70, 0x64, 0x38, 0x89, 0xb4, 0x94, 0x4a,
0x92, 0xc4, 0xa2, 0x6f, 0x9f, 0xb8, 0xf6, 0x68, 0x4b, 0x7b, 0xc2, 0x49, 0x5c, 0xae, 0xb0, 0xe1,
0x04, 0xdf, 0x6c, 0xca, 0xec, 0x0d, 0x04, 0x8a, 0x5f, 0xa7, 0x58, 0xab, 0x4c, 0x98, 0x54, 0x2f,
0x53, 0x4e, 0x64, 0x64, 0x56, 0x93, 0xb0, 0xc1, 0xce, 0xa9, 0x77, 0x36, 0x49, 0x8e, 0x14, 0xbf,
0xfe, 0xe2, 0xec, 0xaf, 0xcb, 0xf7, 0xbd, 0xc9, 0xde, 0xc2, 0xf1, 0x2d, 0x10, 0x51, 0x13, 0x27,
0xa9, 0xd1, 0x06, 0x8f, 0x1c, 0xf9, 0x64, 0x4c, 0x0e, 0x2e, 0x7b, 0x07, 0x27, 0xeb, 0xa8, 0x12,
0xd6, 0xf2, 0x42, 0xa4, 0xa2, 0x11, 0x48, 0x36, 0xd8, 0x75, 0x74, 0x30, 0xa2, 0x3f, 0x77, 0x81,
0x0f, 0xce, 0x67, 0x21, 0xf8, 0xeb, 0x7c, 0x29, 0xf1, 0x8f, 0x0d, 0xf6, 0x1c, 0x36, 0x1d, 0x61,
0x97, 0xad, 0x3e, 0x7f, 0x0c, 0x7b, 0xab, 0xab, 0x9b, 0x7d, 0x04, 0xb6, 0x39, 0x58, 0xf6, 0x12,
0x7c, 0x17, 0x90, 0x58, 0x8c, 0x5c, 0xf7, 0x48, 0xbc, 0xe4, 0x2e, 0x6b, 0x16, 0xc2, 0xe1, 0xad,
0x09, 0xb2, 0xa7, 0xb0, 0x7f, 0x25, 0x72, 0x69, 0xa5, 0x46, 0x47, 0xee, 0x27, 0xfd, 0x7e, 0xf6,
0x0c, 0xfc, 0x3b, 0x46, 0xc2, 0xa6, 0x30, 0xf9, 0x5b, 0x59, 0x97, 0x9e, 0x24, 0xed, 0x72, 0xde,
0xc0, 0x89, 0xd4, 0xf7, 0x0f, 0x76, 0x3e, 0x1d, 0xbd, 0xed, 0x45, 0x6b, 0x2d, 0xbc, 0x9f, 0xf3,
0x42, 0xd2, 0xef, 0x3a, 0x8b, 0x72, 0xad, 0xe2, 0x8e, 0x0a, 0x25, 0x5a, 0x32, 0xb5, 0x12, 0xd8,
0xdd, 0x78, 0x3c, 0x14, 0x86, 0xdd, 0xef, 0x2a, 0x04, 0x86, 0xc5, 0xf0, 0xc9, 0xb2, 0x5d, 0x27,
0xbf, 0xbe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x98, 0xa1, 0x9e, 0x88, 0x03, 0x00, 0x00,
}

View file

@ -0,0 +1,172 @@
#!/usr/bin/env sh
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn () {
echo "$*"
}
die () {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Escape application args
save () {
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
echo " "
}
APP_ARGS=$(save "$@")
# Collect all arguments for the java command, following the shell quoting and substitution rules
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
cd "$(dirname "$0")"
fi
exec "$JAVACMD" "$@"

View file

@ -0,0 +1,84 @@
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View file

@ -0,0 +1 @@
rootProject.name = "opencensus-proto"

View file

@ -1,37 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# never checkin from the bin file (for now)
bin/*
# Test key files
*.pem
# Cover profiles
*.out
# Editor/IDE specific files.
*.sublime-project
*.sublime-workspace

View file

@ -1,18 +0,0 @@
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com>
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com>
Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com>
Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com>
Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net>
Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com>
Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com>

View file

@ -1,182 +0,0 @@
a-palchikov <deemok@gmail.com>
Aaron Lehmann <aaron.lehmann@docker.com>
Aaron Schlesinger <aschlesinger@deis.com>
Aaron Vinson <avinson.public@gmail.com>
Adam Duke <adam.v.duke@gmail.com>
Adam Enger <adamenger@gmail.com>
Adrian Mouat <adrian.mouat@gmail.com>
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
Alex Chan <alex.chan@metaswitch.com>
Alex Elman <aelman@indeed.com>
Alexey Gladkov <gladkov.alexey@gmail.com>
allencloud <allen.sun@daocloud.io>
amitshukla <ashukla73@hotmail.com>
Amy Lindburg <amy.lindburg@docker.com>
Andrew Hsu <andrewhsu@acm.org>
Andrew Meredith <andymeredith@gmail.com>
Andrew T Nguyen <andrew.nguyen@docker.com>
Andrey Kostov <kostov.andrey@gmail.com>
Andy Goldstein <agoldste@redhat.com>
Anis Elleuch <vadmeste@gmail.com>
Anton Tiurin <noxiouz@yandex.ru>
Antonio Mercado <amercado@thinknode.com>
Antonio Murdaca <runcom@redhat.com>
Anusha Ragunathan <anusha@docker.com>
Arien Holthuizen <aholthuizen@schubergphilis.com>
Arnaud Porterie <arnaud.porterie@docker.com>
Arthur Baars <arthur@semmle.com>
Asuka Suzuki <hello@tanksuzuki.com>
Avi Miller <avi.miller@oracle.com>
Ayose Cazorla <ayosec@gmail.com>
BadZen <dave.trombley@gmail.com>
Ben Bodenmiller <bbodenmiller@hotmail.com>
Ben Firshman <ben@firshman.co.uk>
bin liu <liubin0329@gmail.com>
Brian Bland <brian.bland@docker.com>
burnettk <burnettk@gmail.com>
Carson A <ca@carsonoid.net>
Cezar Sa Espinola <cezarsa@gmail.com>
Charles Smith <charles.smith@docker.com>
Chris Dillon <squarism@gmail.com>
cuiwei13 <cuiwei13@pku.edu.cn>
cyli <cyli@twistedmatrix.com>
Daisuke Fujita <dtanshi45@gmail.com>
Daniel Huhn <daniel@danielhuhn.de>
Darren Shepherd <darren@rancher.com>
Dave Trombley <dave.trombley@gmail.com>
Dave Tucker <dt@docker.com>
David Lawrence <david.lawrence@docker.com>
David Verhasselt <david@crowdway.com>
David Xia <dxia@spotify.com>
davidli <wenquan.li@hp.com>
Dejan Golja <dejan@golja.org>
Derek McGowan <derek@mcgstyle.net>
Diogo Mónica <diogo.monica@gmail.com>
DJ Enriquez <dj.enriquez@infospace.com>
Donald Huang <don.hcd@gmail.com>
Doug Davis <dug@us.ibm.com>
Edgar Lee <edgar.lee@docker.com>
Eric Yang <windfarer@gmail.com>
Fabio Berchtold <jamesclonk@jamesclonk.ch>
Fabio Huser <fabio@fh1.ch>
farmerworking <farmerworking@gmail.com>
Felix Yan <felixonmars@archlinux.org>
Florentin Raud <florentin.raud@gmail.com>
Frank Chen <frankchn@gmail.com>
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
gabriell nascimento <gabriell@bluesoft.com.br>
Gleb Schukin <gschukin@ptsecurity.com>
harche <p.harshal@gmail.com>
Henri Gomez <henri.gomez@gmail.com>
Hu Keping <hukeping@huawei.com>
Hua Wang <wanghua.humble@gmail.com>
HuKeping <hukeping@huawei.com>
Ian Babrou <ibobrik@gmail.com>
igayoso <igayoso@gmail.com>
Jack Griffin <jackpg14@gmail.com>
James Findley <jfindley@fastmail.com>
Jason Freidman <jason.freidman@gmail.com>
Jason Heiss <jheiss@aput.net>
Jeff Nickoloff <jeff@allingeek.com>
Jess Frazelle <acidburn@google.com>
Jessie Frazelle <jessie@docker.com>
jhaohai <jhaohai@foxmail.com>
Jianqing Wang <tsing@jianqing.org>
Jihoon Chung <jihoon@gmail.com>
Joao Fernandes <joao.fernandes@docker.com>
John Mulhausen <john@docker.com>
John Starks <jostarks@microsoft.com>
Jon Johnson <jonjohnson@google.com>
Jon Poler <jonathan.poler@apcera.com>
Jonathan Boulle <jonathanboulle@gmail.com>
Jordan Liggitt <jliggitt@redhat.com>
Josh Chorlton <josh.chorlton@docker.com>
Josh Hawn <josh.hawn@docker.com>
Julien Fernandez <julien.fernandez@gmail.com>
Ke Xu <leonhartx.k@gmail.com>
Keerthan Mala <kmala@engineyard.com>
Kelsey Hightower <kelsey.hightower@gmail.com>
Kenneth Lim <kennethlimcp@gmail.com>
Kenny Leung <kleung@google.com>
Li Yi <denverdino@gmail.com>
Liu Hua <sdu.liu@huawei.com>
liuchang0812 <liuchang0812@gmail.com>
Lloyd Ramey <lnr0626@gmail.com>
Louis Kottmann <louis.kottmann@gmail.com>
Luke Carpenter <x@rubynerd.net>
Marcus Martins <marcus@docker.com>
Mary Anthony <mary@docker.com>
Matt Bentley <mbentley@mbentley.net>
Matt Duch <matt@learnmetrics.com>
Matt Moore <mattmoor@google.com>
Matt Robenolt <matt@ydekproductions.com>
Matthew Green <greenmr@live.co.uk>
Michael Prokop <mika@grml.org>
Michal Minar <miminar@redhat.com>
Michal Minář <miminar@redhat.com>
Mike Brown <brownwm@us.ibm.com>
Miquel Sabaté <msabate@suse.com>
Misty Stanley-Jones <misty@apache.org>
Misty Stanley-Jones <misty@docker.com>
Morgan Bauer <mbauer@us.ibm.com>
moxiegirl <mary@docker.com>
Nathan Sullivan <nathan@nightsys.net>
nevermosby <robolwq@qq.com>
Nghia Tran <tcnghia@gmail.com>
Nikita Tarasov <nikita@mygento.ru>
Noah Treuhaft <noah.treuhaft@docker.com>
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
Oilbeater <liumengxinfly@gmail.com>
Olivier Gambier <olivier@docker.com>
Olivier Jacques <olivier.jacques@hp.com>
Omer Cohen <git@omer.io>
Patrick Devine <patrick.devine@docker.com>
Phil Estes <estesp@linux.vnet.ibm.com>
Philip Misiowiec <philip@atlashealth.com>
Pierre-Yves Ritschard <pyr@spootnik.org>
Qiao Anran <qiaoanran@gmail.com>
Randy Barlow <randy@electronsweatshop.com>
Richard Scothern <richard.scothern@docker.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Rusty Conover <rusty@luckydinosaur.com>
Sean Boran <Boran@users.noreply.github.com>
Sebastiaan van Stijn <github@gone.nl>
Sebastien Coavoux <s.coavoux@free.fr>
Serge Dubrouski <sergeyfd@gmail.com>
Sharif Nassar <sharif@mrwacky.com>
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
Shreyas Karnik <karnik.shreyas@gmail.com>
Simon Thulbourn <simon+github@thulbourn.com>
spacexnice <yaoyao.xyy@alibaba-inc.com>
Spencer Rinehart <anubis@overthemonkey.com>
Stan Hu <stanhu@gmail.com>
Stefan Majewsky <stefan.majewsky@sap.com>
Stefan Weil <sw@weilnetz.de>
Stephen J Day <stephen.day@docker.com>
Sungho Moon <sungho.moon@navercorp.com>
Sven Dowideit <SvenDowideit@home.org.au>
Sylvain Baubeau <sbaubeau@redhat.com>
Ted Reed <ted.reed@gmail.com>
tgic <farmer1992@gmail.com>
Thomas Sjögren <konstruktoid@users.noreply.github.com>
Tianon Gravi <admwiggin@gmail.com>
Tibor Vass <teabee89@gmail.com>
Tonis Tiigi <tonistiigi@gmail.com>
Tony Holdstock-Brown <tony@docker.com>
Trevor Pounds <trevor.pounds@gmail.com>
Troels Thomsen <troels@thomsen.io>
Victor Vieux <vieux@docker.com>
Victoria Bialas <victoria.bialas@docker.com>
Vincent Batts <vbatts@redhat.com>
Vincent Demeester <vincent@sbr.pm>
Vincent Giersch <vincent.giersch@ovh.net>
W. Trevor King <wking@tremily.us>
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
xg.song <xg.song@venusource.com>
xiekeyang <xiekeyang@huawei.com>
Yann ROBERT <yann.robert@anantaplex.fr>
yaoyao.xyy <yaoyao.xyy@alibaba-inc.com>
yuexiao-wang <wang.yuexiao@zte.com.cn>
yuzou <zouyu7@huawei.com>
zhouhaibing089 <zhouhaibing089@gmail.com>
姜继忠 <jizhong.jiangjz@alibaba-inc.com>

View file

@ -1,117 +0,0 @@
# Building the registry source
## Use-case
This is useful if you intend to actively work on the registry.
### Alternatives
Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
### Gotchas
You are expected to know your way around with go & git.
If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
## Build the development environment
The first prerequisite of properly building distribution targets is to have a Go
development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
environment.
If a Go development environment is setup, one can use `go get` to install the
`registry` command from the current latest:
go get github.com/docker/distribution/cmd/registry
The above will install the source repository into the `GOPATH`.
Now create the directory for the registry data (this might require you to set permissions properly)
mkdir -p /var/lib/registry
... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
The `registry`
binary can then be run with the following:
$ $GOPATH/bin/registry --version
$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
> __NOTE:__ While you do not need to use `go get` to checkout the distribution
> project, for these build instructions to work, the project must be checked
> out in the correct location in the `GOPATH`. This should almost always be
> `$GOPATH/src/github.com/docker/distribution`.
The registry can be run with the default config using the following
incantation:
$ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] debug server listening localhost:5001
If it is working, one should see the above log messages.
### Repeatable Builds
For the full development experience, one should `cd` into
`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
commands, such as `go test`, should work per package (please see
[Developing](#developing) if they don't work).
A `Makefile` has been provided as a convenience to support repeatable builds.
Please install the following into `GOPATH` for it to work:
go get github.com/golang/lint/golint
Once these commands are available in the `GOPATH`, run `make` to get a full
build:
$ make
+ clean
+ fmt
+ vet
+ lint
+ build
github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
github.com/sirupsen/logrus
github.com/docker/libtrust
...
github.com/yvasiyarov/gorelic
github.com/docker/distribution/registry/handlers
github.com/docker/distribution/cmd/registry
+ test
...
ok github.com/docker/distribution/digest 7.875s
ok github.com/docker/distribution/manifest 0.028s
ok github.com/docker/distribution/notifications 17.322s
? github.com/docker/distribution/registry [no test files]
ok github.com/docker/distribution/registry/api/v2 0.101s
? github.com/docker/distribution/registry/auth [no test files]
ok github.com/docker/distribution/registry/auth/silly 0.011s
...
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
+ binaries
The above provides a repeatable build using the contents of the vendor
directory. This includes formatting, vetting, linting, building,
testing and generating tagged binaries. We can verify this worked by running
the registry binary generated in the "./bin" directory:
$ ./bin/registry -version
./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
### Optional build tags
Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
the environment variable `DOCKER_BUILDTAGS`.

View file

@ -1,108 +0,0 @@
# Changelog
## 2.6.0 (2017-01-18)
#### Storage
- S3: fixed bug in delete due to read-after-write inconsistency
- S3: allow EC2 IAM roles to be used when authorizing region endpoints
- S3: add Object ACL Support
- S3: fix delete method's notion of subpaths
- S3: use multipart upload API in `Move` method for performance
- S3: add v2 signature signing for legacy S3 clones
- Swift: add simple heuristic to detect incomplete DLOs during read ops
- Swift: support different user and tenant domains
- Swift: bulk deletes in chunks
- Aliyun OSS: fix delete method's notion of subpaths
- Aliyun OSS: optimize data copy after upload finishes
- Azure: close leaking response body
- Fix storage drivers dropping non-EOF errors when listing repositories
- Compare path properly when listing repositories in catalog
- Add a foreign layer URL host whitelist
- Improve catalog enumerate runtime
#### Registry
- Export `storage.CreateOptions` in top-level package
- Enable notifications to endpoints that use self-signed certificates
- Properly validate multi-URL foreign layers
- Add control over validation of URLs in pushed manifests
- Proxy mode: fix socket leak when pull is cancelled
- Tag service: properly handle error responses on HEAD request
- Support for custom authentication URL in proxying registry
- Add configuration option to disable access logging
- Add notification filtering by target media type
- Manifest: `References()` returns all children
- Honor `X-Forwarded-Port` and Forwarded headers
- Reference: Preserve tag and digest in With* functions
- Add policy configuration for enforcing repository classes
#### Client
- Changes the client Tags `All()` method to follow links
- Allow registry clients to connect via HTTP2
- Better handling of OAuth errors in client
#### Spec
- Manifest: clarify relationship between urls and foreign layers
- Authorization: add support for repository classes
#### Manifest
- Override media type returned from `Stat()` for existing manifests
- Add plugin mediatype to distribution manifest
#### Docs
- Document `TOOMANYREQUESTS` error code
- Document required Let's Encrypt port
- Improve documentation around implementation of OAuth2
- Improve documentation for configuration
#### Auth
- Add support for registry type in scope
- Add support for using v2 ping challenges for v1
- Add leeway to JWT `nbf` and `exp` checking
- htpasswd: dynamically parse htpasswd file
- Fix missing auth headers with PATCH HTTP request when pushing to default port
#### Dockerfile
- Update to go1.7
- Reorder Dockerfile steps for better layer caching
#### Notes
Documentation has moved to the documentation repository at
`github.com/docker/docker.github.io/tree/master/registry`
The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing.
## 2.5.0 (2016-06-14)
#### Storage
- Ensure uploads directory is cleaned after upload is committed
- Add ability to cap concurrent operations in filesystem driver
- S3: Add 'us-gov-west-1' to the valid region list
- Swift: Handle ceph not returning Last-Modified header for HEAD requests
- Add redirect middleware
#### Registry
- Add support for blobAccessController middleware
- Add support for layers from foreign sources
- Remove signature store
- Add support for Let's Encrypt
- Correct yaml key names in configuration
#### Client
- Add option to get content digest from manifest get
#### Spec
- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported
- Clarify API documentation around catalog fetch behavior
#### API
- Support returning HTTP 429 (Too Many Requests)
#### Documentation
- Update auth documentation examples to show "expires in" as int
#### Docker Image
- Use Alpine Linux as base image

View file

@ -1,148 +0,0 @@
# Contributing to the registry
## Before reporting an issue...
### If your problem is with...
- automated builds
- your account on the [Docker Hub](https://hub.docker.com/)
- any other [Docker Hub](https://hub.docker.com/) issue
Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
### If you...
- need help setting up your registry
- can't figure out something
- are not sure what's going on or what your problem is
Then please do not open an issue here yet - you should first try one of the following support forums:
- irc: #docker-distribution on freenode
- mailing-list: <distribution@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
### Reporting security issues
The Docker maintainers take security seriously. If you discover a security
issue, please bring it to their attention right away!
Please **DO NOT** file a public issue, instead send your report privately to
[security@docker.com](mailto:security@docker.com).
## Reporting an issue properly
By following these simple rules you will get better and faster feedback on your issue.
- search the bugtracker for an already reported issue
### If you found an issue that describes your problem:
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
- please refrain from adding "same thing here" or "+1" comments
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
- comment if you have some new, technical and relevant information to add to the case
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
### If you have not found an existing issue that describes your problem:
1. create a new issue, with a succinct title that describes your issue:
- bad title: "It doesn't work with my docker"
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
2. copy the output of:
- `docker version`
- `docker info`
- `docker exec <registry-container> registry -version`
3. copy the command line you used to launch your Registry
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
5. reproduce your problem and get your docker daemon logs showing the error
6. if relevant, copy your registry logs that show the error
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
## Contributing a patch for a known bug, or a small correction
You should follow the basic GitHub workflow:
1. fork
2. commit a change
3. make sure the tests pass
4. PR
Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
- configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
- sign your commits using `-s`: `git commit -s -m "My commit"`
Some simple rules to ensure quick merge:
- clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
- prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
- if you need to amend your PR following comments, please squash instead of adding more commits
## Contributing new features
You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
Then you should submit your implementation, clearly linking to the issue (and possible proposal).
Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
It's mandatory to:
- interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
- address maintainers' comments and modify your submission accordingly
- write tests for any new code
Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493)
## Coding Style
Unless explicitly stated, we follow all coding guidelines from the Go
community. While some of these standards may seem arbitrary, they somehow seem
to result in a solid, consistent codebase.
It is possible that the code base does not currently comply with these
guidelines. We are not looking for a massive PR that fixes this, since that
goes against the spirit of the guidelines. All new contributions should make a
best effort to clean up and make the code base better than they left it.
Obviously, apply your best judgement. Remember, the goal here is to make the
code base easier for humans to navigate and understand. Always keep that in
mind when nudging others to comply.
The rules:
1. All code should be formatted with `gofmt -s`.
2. All code should pass the default levels of
[`golint`](https://github.com/golang/lint).
3. All code should follow the guidelines covered in [Effective
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
4. Comment the code. Tell us the why, the history and the context.
5. Document _all_ declarations and methods, even private ones. Declare
expectations, caveats and anything else that may be important. If a type
gets exported, having the comments already there will ensure it's ready.
6. Variable name length should be proportional to its context and no longer.
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
In practice, short methods will have short variable names and globals will
have longer names.
7. No underscores in package names. If you need a compound name, step back,
and re-examine why you need a compound name. If you still think you need a
compound name, lose the underscore.
8. No utils or helpers packages. If a function is not general enough to
warrant its own package, it has not been written generally enough to be a
part of a util package. Just leave it unexported and well-documented.
9. All tests should run with `go test` and outside tooling should not be
required. No, we don't need another unit testing framework. Assertion
packages are acceptable if they provide _real_ incremental value.
10. Even though we call these "rules" above, they are actually just
guidelines. Since you've read all the rules, you now know that.
If you are having trouble getting into the mood of idiomatic Go, we recommend
reading through [Effective Go](http://golang.org/doc/effective_go.html). The
[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
kool-aid is a lot easier than going thirsty.

View file

@ -1,21 +0,0 @@
FROM golang:1.8-alpine
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
ENV DOCKER_BUILDTAGS include_oss include_gcs
ARG GOOS=linux
ARG GOARCH=amd64
RUN set -ex \
&& apk add --no-cache make git
WORKDIR $DISTRIBUTION_DIR
COPY . $DISTRIBUTION_DIR
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
RUN make PREFIX=/go clean binaries
VOLUME ["/var/lib/registry"]
EXPOSE 5000
ENTRYPOINT ["registry"]
CMD ["serve", "/etc/docker/registry/config.yml"]

View file

@ -1,58 +0,0 @@
# Distribution maintainers file
#
# This file describes who runs the docker/distribution project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
people = [
"aaronlehmann",
"dmcgowan",
"dmp42",
"richardscothern",
"shykes",
"stevvooe",
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.aaronlehmann]
Name = "Aaron Lehmann"
Email = "aaron.lehmann@docker.com"
GitHub = "aaronlehmann"
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@mcgstyle.net"
GitHub = "dmcgowan"
[people.dmp42]
Name = "Olivier Gambier"
Email = "olivier@docker.com"
GitHub = "dmp42"
[people.richardscothern]
Name = "Richard Scothern"
Email = "richard.scothern@gmail.com"
GitHub = "richardscothern"
[people.shykes]
Name = "Solomon Hykes"
Email = "solomon@docker.com"
GitHub = "shykes"
[people.stevvooe]
Name = "Stephen Day"
Email = "stephen.day@docker.com"
GitHub = "stevvooe"

View file

@ -1,99 +0,0 @@
# Set an output prefix, which is the local directory if not specified
PREFIX?=$(shell pwd)
# Used to populate version variable in main package.
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
# Allow turning off function inlining and variable registerization
ifeq (${DISABLE_OPTIMIZATION},true)
GO_GCFLAGS=-gcflags "-N -l"
VERSION:="$(VERSION)-noopt"
endif
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet
.DEFAULT: all
all: fmt vet lint build test binaries
AUTHORS: .mailmap .git/HEAD
git log --format='%aN <%aE>' | sort -fu > $@
# This only needs to be generated by hand when cutting full releases.
version/version.go:
./version/version.sh > $@
# Required for go 1.5 to build
GO15VENDOREXPERIMENT := 1
# Go files
GOFILES=$(shell find . -type f -name '*.go')
# Package list
PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/)
# Resolving binary dependencies for specific targets
GOLINT=$(shell which golint || echo '')
VNDR=$(shell which vndr || echo '')
${PREFIX}/bin/registry: $(GOFILES)
@echo "+ $@"
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry
${PREFIX}/bin/digest: $(GOFILES)
@echo "+ $@"
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest
${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES)
@echo "+ $@"
@go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template
docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template
./bin/registry-api-descriptor-template $< > $@
vet:
@echo "+ $@"
@go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS)
fmt:
@echo "+ $@"
@test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \
(echo >&2 "+ please format Go code with 'gofmt -s'" && false)
lint:
@echo "+ $@"
$(if $(GOLINT), , \
$(error Please install golint: `go get -u github.com/golang/lint/golint`))
@test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)"
build:
@echo "+ $@"
@go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS)
test:
@echo "+ $@"
@go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS)
test-full:
@echo "+ $@"
@go test -tags "${DOCKER_BUILDTAGS}" $(PKGS)
binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template
@echo "+ $@"
clean:
@echo "+ $@"
@rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template"
dep-validate:
@echo "+ $@"
$(if $(VNDR), , \
$(error Please install vndr: go get github.com/lk4d4/vndr))
@rm -Rf .vendor.bak
@mv vendor .vendor.bak
@$(VNDR)
@test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \
(echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false)
@rm -Rf vendor
@mv .vendor.bak vendor

View file

@ -1,130 +0,0 @@
# Distribution
The Docker toolset to pack, ship, store, and deliver content.
This repository's main product is the Docker Registry 2.0 implementation
for storing and distributing Docker images. It supersedes the
[docker/docker-registry](https://github.com/docker/docker-registry)
project with a new API design, focused around security and performance.
<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master)
[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution)
This repository contains the following components:
|**Component** |Description |
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
### How does this integrate with Docker engine?
This project should provide an implementation to a V2 API for use in the [Docker
core project](https://github.com/docker/docker). The API should be embeddable
and simplify the process of securely pulling and pushing content from `docker`
daemons.
### What are the long term goals of the Distribution project?
The _Distribution_ project has the further long term goal of providing a
secure tool chain for distributing content. The specifications, APIs and tools
should be as useful with Docker as they are without.
Our goal is to design a professional grade and extensible content distribution
system that allow users to:
* Enjoy an efficient, secured and reliable way to store, manage, package and
exchange content
* Hack/roll their own on top of healthy open-source components
* Implement their own home made solution through good specs, and solid
extensions mechanism.
## More about Registry 2.0
The new registry implementation provides the following benefits:
- faster push and pull
- new, more efficient implementation
- simplified deployment
- pluggable storage backend
- webhook notifications
For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
### Who needs to deploy a registry?
By default, Docker users pull images from Docker's public registry instance.
[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
ability. Users can also push images to a repository on Docker's public registry,
if they have a [Docker Hub](https://hub.docker.com/) account.
For some users and even companies, this default behavior is sufficient. For
others, it is not.
For example, users with their own software products may want to maintain a
registry for private, company images. Also, you may wish to deploy your own
image repository for images used to test or in continuous integration. For these
use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
may be the better choice.
### Migration to Registry 2.0
For those who have previously deployed their own registry based on the Registry
1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
data migration is required. A tool to assist with migration efforts has been
created. For more information see [docker/migrator](https://github.com/docker/migrator).
## Contribute
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
issues, fixes, and patches to this project. If you are contributing code, see
the instructions for [building a development environment](BUILDING.md).
## Support
If any issues are encountered while using the _Distribution_ project, several
avenues are available for support:
<table>
<tr>
<th align="left">
IRC
</th>
<td>
#docker-distribution on FreeNode
</td>
</tr>
<tr>
<th align="left">
Issue Tracker
</th>
<td>
github.com/docker/distribution/issues
</td>
</tr>
<tr>
<th align="left">
Google Groups
</th>
<td>
https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
</td>
</tr>
<tr>
<th align="left">
Mailing List
</th>
<td>
docker@dockerproject.org
</td>
</tr>
</table>
## License
This project is distributed under [Apache License, Version 2.0](LICENSE).

View file

@ -1,44 +0,0 @@
## Registry Release Checklist
10. Compile release notes detailing features and since the last release.
Update the `CHANGELOG.md` file and create a PR to master with the updates.
Once that PR has been approved by maintainers the change may be cherry-picked
to the release branch (new release branches may be forked from this commit).
20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go`
30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files.
```
make AUTHORS
```
40. Create a signed tag.
Distribution uses semantic versioning. Tags are of the format
`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added
to your Github account. The comment for the tag should include the release
notes, use previous tags as a guide for formatting consistently. Run
`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag,
check comment and correct commit hash.
50. Push the signed tag
60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox.
70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request.
80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary.
e.g. to release `2.3.1`
`2.3.1 (new)`
`2.3.0 -> 2.3.0` can be removed
`2 -> 2.3.1`
`2.3 -> 2.3.1`
90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images.

View file

@ -1,267 +0,0 @@
# Roadmap
The Distribution Project consists of several components, some of which are
still being defined. This document defines the high-level goals of the
project, identifies the current components, and defines the release-
relationship to the Docker Platform.
* [Distribution Goals](#distribution-goals)
* [Distribution Components](#distribution-components)
* [Project Planning](#project-planning): release-relationship to the Docker Platform.
This road map is a living document, providing an overview of the goals and
considerations made in respect of the future of the project.
## Distribution Goals
- Replace the existing [docker registry](github.com/docker/docker-registry)
implementation as the primary implementation.
- Replace the existing push and pull code in the docker engine with the
distribution package.
- Define a strong data model for distributing docker images
- Provide a flexible distribution tool kit for use in the docker platform
- Unlock new distribution models
## Distribution Components
Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming
features and bugfixes for a component will be added to the relevant milestone. If a feature or
bugfix is not part of a milestone, it is currently unscheduled for
implementation.
* [Registry](#registry)
* [Distribution Package](#distribution-package)
***
### Registry
The new Docker registry is the main portion of the distribution repository.
Registry 2.0 is the first release of the next-generation registry. This was
primarily focused on implementing the [new registry
API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
with a focus on security and performance.
Following from the Distribution project goals above, we have a set of goals
for registry v2 that we would like to follow in the design. New features
should be compared against these goals.
#### Data Storage and Distribution First
The registry's first goal is to provide a reliable, consistent storage
location for Docker images. The registry should only provide the minimal
amount of indexing required to fetch image data and no more.
This means we should be selective in new features and API additions, including
those that may require expensive, ever growing indexes. Requests should be
servable in "constant time".
#### Content Addressability
All data objects used in the registry API should be content addressable.
Content identifiers should be secure and verifiable. This provides a secure,
reliable base from which to build more advanced content distribution systems.
#### Content Agnostic
In the past, changes to the image format would require large changes in Docker
and the Registry. By decoupling the distribution and image format, we can
allow the formats to progress without having to coordinate between the two.
This means that we should be focused on decoupling Docker from the registry
just as much as decoupling the registry from Docker. Such an approach will
allow us to unlock new distribution models that haven't been possible before.
We can take this further by saying that the new registry should be content
agnostic. The registry provides a model of names, tags, manifests and content
addresses and that model can be used to work with content.
#### Simplicity
The new registry should be closer to a microservice component than its
predecessor. This means it should have a narrower API and a low number of
service dependencies. It should be easy to deploy.
This means that other solutions should be explored before changing the API or
adding extra dependencies. If functionality is required, can it be added as an
extension or companion service.
#### Extensibility
The registry should provide extension points to add functionality. By keeping
the scope narrow, but providing the ability to add functionality.
Features like search, indexing, synchronization and registry explorers fall
into this category. No such feature should be added unless we've found it
impossible to do through an extension.
#### Active Feature Discussions
The following are feature discussions that are currently active.
If you don't see your favorite, unimplemented feature, feel free to contact us
via IRC or the mailing list and we can talk about adding it. The goal here is
to make sure that new features go through a rigid design process before
landing in the registry.
##### Proxying to other Registries
A _pull-through caching_ mode exists for the registry, but is restricted from
within the docker client to only mirror the official Docker Hub. This functionality
can be expanded when image provenance has been specified and implemented in the
distribution project.
##### Metadata storage
Metadata for the registry is currently stored with the manifest and layer data on
the storage backend. While this is a big win for simplicity and reliably maintaining
state, it comes with the cost of consistency and high latency. The mutable registry
metadata operations should be abstracted behind an API which will allow ACID compliant
storage systems to handle metadata.
##### Peer to Peer transfer
Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
##### Indexing, Search and Discovery
The original registry provided some implementation of search for use with
private registries. Support has been elided from V2 since we'd like to both
decouple search functionality from the registry. The makes the registry
simpler to deploy, especially in use cases where search is not needed, and
let's us decouple the image format from the registry.
There are explorations into using the catalog API and notification system to
build external indexes. The current line of thought is that we will define a
common search API to index and query docker images. Such a system could be run
as a companion to a registry or set of registries to power discovery.
The main issue with search and discovery is that there are so many ways to
accomplish it. There are two aspects to this project. The first is deciding on
how it will be done, including an API definition that can work with changing
data formats. The second is the process of integrating with `docker search`.
We expect that someone attempts to address the problem with the existing tools
and propose it as a standard search API or uses it to inform a standardization
process. Once this has been explored, we integrate with the docker client.
Please see the following for more detail:
- https://github.com/docker/distribution/issues/206
##### Deletes
> __NOTE:__ Deletes are a much asked for feature. Before requesting this
feature or participating in discussion, we ask that you read this section in
full and understand the problems behind deletes.
While, at first glance, implementing deleting seems simple, there are a number
mitigating factors that make many solutions not ideal or even pathological in
the context of a registry. The following paragraph discuss the background and
approaches that could be applied to arrive at a solution.
The goal of deletes in any system is to remove unused or unneeded data. Only
data requested for deletion should be removed and no other data. Removing
unintended data is worse than _not_ removing data that was requested for
removal but ideally, both are supported. Generally, according to this rule, we
err on holding data longer than needed, ensuring that it is only removed when
we can be certain that it can be removed. With the current behavior, we opt to
hold onto the data forever, ensuring that data cannot be incorrectly removed.
To understand the problems with implementing deletes, one must understand the
data model. All registry data is stored in a filesystem layout, implemented on
a "storage driver", effectively a _virtual file system_ (VFS). The storage
system must assume that this VFS layer will be eventually consistent and has
poor read- after-write consistency, since this is the lower common denominator
among the storage drivers. This is mitigated by writing values in reverse-
dependent order, but makes wider transactional operations unsafe.
Layered on the VFS model is a content-addressable _directed, acyclic graph_
(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
Since the same data can be referenced by multiple manifests, we only store
data once, even if it is in different repositories. Thus, we have a set of
blobs, referenced by tags and manifests. If we want to delete a blob we need
to be certain that it is no longer referenced by another manifest or tag. When
we delete a manifest, we also can try to delete the referenced blobs. Deciding
whether or not a blob has an active reference is the crux of the problem.
Conceptually, deleting a manifest and its resources is quite simple. Just find
all the manifests, enumerate the referenced blobs and delete the blobs not in
that set. An astute observer will recognize this as a garbage collection
problem. As with garbage collection in programming languages, this is very
simple when one always has a consistent view. When one adds parallelism and an
inconsistent view of data, it becomes very challenging.
A simple example can demonstrate this. Let's say we are deleting a manifest
_A_ in one process. We scan the manifest and decide that all the blobs are
ready for deletion. Concurrently, we have another process accepting a new
manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
is accepted and all the blobs are considered present, so the operation
proceeds. The original process then deletes the referenced blobs, assuming
they were unreferenced. The manifest _B_, which we thought had all of its data
present, can no longer be served by the registry, since the dependent data has
been deleted.
Deleting data from the registry safely requires some way to coordinate this
operation. The following approaches are being considered:
- _Reference Counting_ - Maintain a count of references to each blob. This is
challenging for a number of reasons: 1. maintaining a consistent consensus
of reference counts across a set of Registries and 2. Building the initial
list of reference counts for an existing registry. These challenges can be
met with a consensus protocol like Paxos or Raft in the first case and a
necessary but simple scan in the second..
- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
and find all blob references. Delete all unreferenced blobs. This approach
is very simple but requires disabling writes for a period of time while the
service reads all data. This is slow and expensive but very accurate and
effective.
- _Generational GC_ - Do something similar to above but instead of blocking
writes, writes are sent to another storage backend while reads are broadcast
to the new and old backends. GC is then performed on the read-only portion.
Because writes land in the new backend, the data in the read-only section
can be safely deleted. The main drawbacks of this approach are complexity
and coordination.
- _Centralized Oracle_ - Using a centralized, transactional database, we can
know exactly which data is referenced at any given time. This avoids
coordination problem by managing this data in a single location. We trade
off metadata scalability for simplicity and performance. This is a very good
option for most registry deployments. This would create a bottleneck for
registry metadata. However, metadata is generally not the main bottleneck
when serving images.
Please let us know if other solutions exist that we have yet to enumerate.
Note that for any approach, implementation is a massive consideration. For
example, a mark-sweep based solution may seem simple but the amount of work in
coordination offset the extra work it might take to build a _Centralized
Oracle_. We'll accept proposals for any solution but please coordinate with us
before dropping code.
At this time, we have traded off simplicity and ease of deployment for disk
space. Simplicity and ease of deployment tend to reduce developer involvement,
which is currently the most expensive resource in software engineering. Taking
on any solution for deletes will greatly effect these factors, trading off
very cheap disk space for a complex deployment and operational story.
Please see the following issues for more detail:
- https://github.com/docker/distribution/issues/422
- https://github.com/docker/distribution/issues/461
- https://github.com/docker/distribution/issues/462
### Distribution Package
At its core, the Distribution Project is a set of Go packages that make up
Distribution Components. At this time, most of these packages make up the
Registry implementation.
The package itself is considered unstable. If you're using it, please take care to vendor the dependent version.
For feature additions, please see the Registry section. In the future, we may break out a
separate Roadmap for distribution-specific features that apply to more than
just the registry.
***
### Project Planning
An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.

View file

@ -1,257 +0,0 @@
package distribution
import (
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/opencontainers/go-digest"
)
var (
// ErrBlobExists returned when blob already exists
ErrBlobExists = errors.New("blob exists")
// ErrBlobDigestUnsupported when blob digest is an unsupported version.
ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
// ErrBlobUnknown when blob is not found.
ErrBlobUnknown = errors.New("unknown blob")
// ErrBlobUploadUnknown returned when upload is not found.
ErrBlobUploadUnknown = errors.New("blob upload unknown")
// ErrBlobInvalidLength returned when the blob has an expected length on
// commit, meaning mismatched with the descriptor or an invalid value.
ErrBlobInvalidLength = errors.New("blob invalid length")
)
// ErrBlobInvalidDigest returned when digest check fails.
type ErrBlobInvalidDigest struct {
Digest digest.Digest
Reason error
}
func (err ErrBlobInvalidDigest) Error() string {
return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
err.Digest, err.Reason)
}
// ErrBlobMounted returned when a blob is mounted from another repository
// instead of initiating an upload session.
type ErrBlobMounted struct {
From reference.Canonical
Descriptor Descriptor
}
func (err ErrBlobMounted) Error() string {
return fmt.Sprintf("blob mounted from: %v to: %v",
err.From, err.Descriptor)
}
// Descriptor describes targeted content. Used in conjunction with a blob
// store, a descriptor can be used to fetch, store and target any kind of
// blob. The struct also describes the wire protocol format. Fields should
// only be added but never changed.
type Descriptor struct {
// MediaType describe the type of the content. All text based formats are
// encoded as utf-8.
MediaType string `json:"mediaType,omitempty"`
// Size in bytes of content.
Size int64 `json:"size,omitempty"`
// Digest uniquely identifies the content. A byte stream can be verified
// against against this digest.
Digest digest.Digest `json:"digest,omitempty"`
// URLs contains the source URLs of this content.
URLs []string `json:"urls,omitempty"`
// NOTE: Before adding a field here, please ensure that all
// other options have been exhausted. Much of the type relationships
// depend on the simplicity of this type.
}
// Descriptor returns the descriptor, to make it satisfy the Describable
// interface. Note that implementations of Describable are generally objects
// which can be described, not simply descriptors; this exception is in place
// to make it more convenient to pass actual descriptors to functions that
// expect Describable objects.
func (d Descriptor) Descriptor() Descriptor {
return d
}
// BlobStatter makes blob descriptors available by digest. The service may
// provide a descriptor of a different digest if the provided digest is not
// canonical.
type BlobStatter interface {
// Stat provides metadata about a blob identified by the digest. If the
// blob is unknown to the describer, ErrBlobUnknown will be returned.
Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
}
// BlobDeleter enables deleting blobs from storage.
type BlobDeleter interface {
Delete(ctx context.Context, dgst digest.Digest) error
}
// BlobEnumerator enables iterating over blobs from storage
type BlobEnumerator interface {
Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
}
// BlobDescriptorService manages metadata about a blob by digest. Most
// implementations will not expose such an interface explicitly. Such mappings
// should be maintained by interacting with the BlobIngester. Hence, this is
// left off of BlobService and BlobStore.
type BlobDescriptorService interface {
BlobStatter
// SetDescriptor assigns the descriptor to the digest. The provided digest and
// the digest in the descriptor must map to identical content but they may
// differ on their algorithm. The descriptor must have the canonical
// digest of the content and the digest algorithm must match the
// annotators canonical algorithm.
//
// Such a facility can be used to map blobs between digest domains, with
// the restriction that the algorithm of the descriptor must match the
// canonical algorithm (ie sha256) of the annotator.
SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
// Clear enables descriptors to be unlinked
Clear(ctx context.Context, dgst digest.Digest) error
}
// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
type BlobDescriptorServiceFactory interface {
BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
}
// ReadSeekCloser is the primary reader type for blob data, combining
// io.ReadSeeker with io.Closer.
type ReadSeekCloser interface {
io.ReadSeeker
io.Closer
}
// BlobProvider describes operations for getting blob data.
type BlobProvider interface {
// Get returns the entire blob identified by digest along with the descriptor.
Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
// Open provides a ReadSeekCloser to the blob identified by the provided
// descriptor. If the blob is not known to the service, an error will be
// returned.
Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
}
// BlobServer can serve blobs via http.
type BlobServer interface {
// ServeBlob attempts to serve the blob, identified by dgst, via http. The
// service may decide to redirect the client elsewhere or serve the data
// directly.
//
// This handler only issues successful responses, such as 2xx or 3xx,
// meaning it serves data or issues a redirect. If the blob is not
// available, an error will be returned and the caller may still issue a
// response.
//
// The implementation may serve the same blob from a different digest
// domain. The appropriate headers will be set for the blob, unless they
// have already been set by the caller.
ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
}
// BlobIngester ingests blob data.
type BlobIngester interface {
// Put inserts the content p into the blob service, returning a descriptor
// or an error.
Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
// Create allocates a new blob writer to add a blob to this service. The
// returned handle can be written to and later resumed using an opaque
// identifier. With this approach, one can Close and Resume a BlobWriter
// multiple times until the BlobWriter is committed or cancelled.
Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
// Resume attempts to resume a write to a blob, identified by an id.
Resume(ctx context.Context, id string) (BlobWriter, error)
}
// BlobCreateOption is a general extensible function argument for blob creation
// methods. A BlobIngester may choose to honor any or none of the given
// BlobCreateOptions, which can be specific to the implementation of the
// BlobIngester receiving them.
// TODO (brianbland): unify this with ManifestServiceOption in the future
type BlobCreateOption interface {
Apply(interface{}) error
}
// CreateOptions is a collection of blob creation modifiers relevant to general
// blob storage intended to be configured by the BlobCreateOption.Apply method.
type CreateOptions struct {
Mount struct {
ShouldMount bool
From reference.Canonical
// Stat allows to pass precalculated descriptor to link and return.
// Blob access check will be skipped if set.
Stat *Descriptor
}
}
// BlobWriter provides a handle for inserting data into a blob store.
// Instances should be obtained from BlobWriteService.Writer and
// BlobWriteService.Resume. If supported by the store, a writer can be
// recovered with the id.
type BlobWriter interface {
io.WriteCloser
io.ReaderFrom
// Size returns the number of bytes written to this blob.
Size() int64
// ID returns the identifier for this writer. The ID can be used with the
// Blob service to later resume the write.
ID() string
// StartedAt returns the time this blob write was started.
StartedAt() time.Time
// Commit completes the blob writer process. The content is verified
// against the provided provisional descriptor, which may result in an
// error. Depending on the implementation, written data may be validated
// against the provisional descriptor fields. If MediaType is not present,
// the implementation may reject the commit or assign "application/octet-
// stream" to the blob. The returned descriptor may have a different
// digest depending on the blob store, referred to as the canonical
// descriptor.
Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
// Cancel ends the blob write without storing any data and frees any
// associated resources. Any data written thus far will be lost. Cancel
// implementations should allow multiple calls even after a commit that
// result in a no-op. This allows use of Cancel in a defer statement,
// increasing the assurance that it is correctly called.
Cancel(ctx context.Context) error
}
// BlobService combines the operations to access, read and write blobs. This
// can be used to describe remote blob services.
type BlobService interface {
BlobStatter
BlobProvider
BlobIngester
}
// BlobStore represent the entire suite of blob related operations. Such an
// implementation can access, read, write, delete and serve blobs.
type BlobStore interface {
BlobService
BlobServer
BlobDeleter
}

View file

@ -1,94 +0,0 @@
# Pony-up!
machine:
pre:
# Install gvm
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer)
# Install codecov for coverage
- pip install --user codecov
post:
# go
- gvm install go1.8 --prefer-binary --name=stable
environment:
# Convenient shortcuts to "common" locations
CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME
BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
# Trick circle brainflat "no absolute path" behavior
BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
DOCKER_BUILDTAGS: "include_oss include_gcs"
# Workaround Circle parsing dumb bugs and/or YAML wonkyness
CIRCLE_PAIN: "mode: set"
hosts:
# Not used yet
fancy: 127.0.0.1
dependencies:
pre:
# Copy the code to the gopath of all go versions
- >
gvm use stable &&
mkdir -p "$(dirname $BASE_STABLE)" &&
cp -R "$CHECKOUT" "$BASE_STABLE"
override:
# Install dependencies for every copied clone/go version
- gvm use stable && go get github.com/lk4d4/vndr:
pwd: $BASE_STABLE
post:
# For the stable go version, additionally install linting tools
- >
gvm use stable &&
go get github.com/axw/gocov/gocov github.com/golang/lint/golint
test:
pre:
# Output the go versions we are going to test
# - gvm use old && go version
- gvm use stable && go version
# Ensure validation of dependencies
- git fetch origin:
pwd: $BASE_STABLE
- gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi:
pwd: $BASE_STABLE
# First thing: build everything. This will catch compile errors, and it's
# also necessary for go vet to work properly (see #807).
- gvm use stable && go install $(go list ./... | grep -v "/vendor/"):
pwd: $BASE_STABLE
# FMT
- gvm use stable && make fmt:
pwd: $BASE_STABLE
# VET
- gvm use stable && make vet:
pwd: $BASE_STABLE
# LINT
- gvm use stable && make lint:
pwd: $BASE_STABLE
override:
# Test stable, and report
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE':
timeout: 1000
pwd: $BASE_STABLE
# Test stable with race
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE':
timeout: 1000
pwd: $BASE_STABLE
post:
# Report to codecov
- bash <(curl -s https://codecov.io/bash):
pwd: $BASE_STABLE
## Notes
# Do we want these as well?
# - go get code.google.com/p/go.tools/cmd/goimports
# - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
# http://labix.org/gocheck

View file

@ -1,7 +0,0 @@
#!/usr/bin/env bash
# Given a subpackage and the containing package, figures out which packages
# need to be passed to `go test -coverpkg`: this includes all of the
# subpackage's dependencies within the containing package, as well as the
# subpackage itself.
DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)"
echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ','

View file

@ -1,247 +0,0 @@
package digestset
import (
"errors"
"sort"
"strings"
"sync"
digest "github.com/opencontainers/go-digest"
)
var (
// ErrDigestNotFound is used when a matching digest
// could not be found in a set.
ErrDigestNotFound = errors.New("digest not found")
// ErrDigestAmbiguous is used when multiple digests
// are found in a set. None of the matching digests
// should be considered valid matches.
ErrDigestAmbiguous = errors.New("ambiguous digest string")
)
// Set is used to hold a unique set of digests which
// may be easily referenced by easily referenced by a string
// representation of the digest as well as short representation.
// The uniqueness of the short representation is based on other
// digests in the set. If digests are omitted from this set,
// collisions in a larger set may not be detected, therefore it
// is important to always do short representation lookups on
// the complete set of digests. To mitigate collisions, an
// appropriately long short code should be used.
type Set struct {
mutex sync.RWMutex
entries digestEntries
}
// NewSet creates an empty set of digests
// which may have digests added.
func NewSet() *Set {
return &Set{
entries: digestEntries{},
}
}
// checkShortMatch checks whether two digests match as either whole
// values or short values. This function does not test equality,
// rather whether the second value could match against the first
// value.
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
if len(hex) == len(shortHex) {
if hex != shortHex {
return false
}
if len(shortAlg) > 0 && string(alg) != shortAlg {
return false
}
} else if !strings.HasPrefix(hex, shortHex) {
return false
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
return false
}
return true
}
// Lookup looks for a digest matching the given string representation.
// If no digests could be found ErrDigestNotFound will be returned
// with an empty digest value. If multiple matches are found
// ErrDigestAmbiguous will be returned with an empty digest value.
func (dst *Set) Lookup(d string) (digest.Digest, error) {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
if len(dst.entries) == 0 {
return "", ErrDigestNotFound
}
var (
searchFunc func(int) bool
alg digest.Algorithm
hex string
)
dgst, err := digest.Parse(d)
if err == digest.ErrDigestInvalidFormat {
hex = d
searchFunc = func(i int) bool {
return dst.entries[i].val >= d
}
} else {
hex = dgst.Hex()
alg = dgst.Algorithm()
searchFunc = func(i int) bool {
if dst.entries[i].val == hex {
return dst.entries[i].alg >= alg
}
return dst.entries[i].val >= hex
}
}
idx := sort.Search(len(dst.entries), searchFunc)
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
return "", ErrDigestNotFound
}
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
return dst.entries[idx].digest, nil
}
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
return "", ErrDigestAmbiguous
}
return dst.entries[idx].digest, nil
}
// Add adds the given digest to the set. An error will be returned
// if the given digest is invalid. If the digest already exists in the
// set, this operation will be a no-op.
func (dst *Set) Add(d digest.Digest) error {
if err := d.Validate(); err != nil {
return err
}
dst.mutex.Lock()
defer dst.mutex.Unlock()
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
searchFunc := func(i int) bool {
if dst.entries[i].val == entry.val {
return dst.entries[i].alg >= entry.alg
}
return dst.entries[i].val >= entry.val
}
idx := sort.Search(len(dst.entries), searchFunc)
if idx == len(dst.entries) {
dst.entries = append(dst.entries, entry)
return nil
} else if dst.entries[idx].digest == d {
return nil
}
entries := append(dst.entries, nil)
copy(entries[idx+1:], entries[idx:len(entries)-1])
entries[idx] = entry
dst.entries = entries
return nil
}
// Remove removes the given digest from the set. An err will be
// returned if the given digest is invalid. If the digest does
// not exist in the set, this operation will be a no-op.
func (dst *Set) Remove(d digest.Digest) error {
if err := d.Validate(); err != nil {
return err
}
dst.mutex.Lock()
defer dst.mutex.Unlock()
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
searchFunc := func(i int) bool {
if dst.entries[i].val == entry.val {
return dst.entries[i].alg >= entry.alg
}
return dst.entries[i].val >= entry.val
}
idx := sort.Search(len(dst.entries), searchFunc)
// Not found if idx is after or value at idx is not digest
if idx == len(dst.entries) || dst.entries[idx].digest != d {
return nil
}
entries := dst.entries
copy(entries[idx:], entries[idx+1:])
entries = entries[:len(entries)-1]
dst.entries = entries
return nil
}
// All returns all the digests in the set
func (dst *Set) All() []digest.Digest {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
retValues := make([]digest.Digest, len(dst.entries))
for i := range dst.entries {
retValues[i] = dst.entries[i].digest
}
return retValues
}
// ShortCodeTable returns a map of Digest to unique short codes. The
// length represents the minimum value, the maximum length may be the
// entire value of digest if uniqueness cannot be achieved without the
// full value. This function will attempt to make short codes as short
// as possible to be unique.
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
m := make(map[digest.Digest]string, len(dst.entries))
l := length
resetIdx := 0
for i := 0; i < len(dst.entries); i++ {
var short string
extended := true
for extended {
extended = false
if len(dst.entries[i].val) <= l {
short = dst.entries[i].digest.String()
} else {
short = dst.entries[i].val[:l]
for j := i + 1; j < len(dst.entries); j++ {
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
if j > resetIdx {
resetIdx = j
}
extended = true
} else {
break
}
}
if extended {
l++
}
}
}
m[dst.entries[i].digest] = short
if i >= resetIdx {
l = length
}
}
return m
}
type digestEntry struct {
alg digest.Algorithm
val string
digest digest.Digest
}
type digestEntries []*digestEntry
func (d digestEntries) Len() int {
return len(d)
}
func (d digestEntries) Less(i, j int) bool {
if d[i].val != d[j].val {
return d[i].val < d[j].val
}
return d[i].alg < d[j].alg
}
func (d digestEntries) Swap(i, j int) {
d[i], d[j] = d[j], d[i]
}

View file

@ -1,7 +0,0 @@
// Package distribution will define the interfaces for the components of
// docker distribution. The goal is to allow users to reliably package, ship
// and store content related to docker images.
//
// This is currently a work in progress. More details are available in the
// README.md.
package distribution

View file

@ -1,115 +0,0 @@
package distribution
import (
"errors"
"fmt"
"strings"
"github.com/opencontainers/go-digest"
)
// ErrAccessDenied is returned when an access to a requested resource is
// denied.
var ErrAccessDenied = errors.New("access denied")
// ErrManifestNotModified is returned when a conditional manifest GetByTag
// returns nil due to the client indicating it has the latest version
var ErrManifestNotModified = errors.New("manifest not modified")
// ErrUnsupported is returned when an unimplemented or unsupported action is
// performed
var ErrUnsupported = errors.New("operation unsupported")
// ErrTagUnknown is returned if the given tag is not known by the tag service
type ErrTagUnknown struct {
Tag string
}
func (err ErrTagUnknown) Error() string {
return fmt.Sprintf("unknown tag=%s", err.Tag)
}
// ErrRepositoryUnknown is returned if the named repository is not known by
// the registry.
type ErrRepositoryUnknown struct {
Name string
}
func (err ErrRepositoryUnknown) Error() string {
return fmt.Sprintf("unknown repository name=%s", err.Name)
}
// ErrRepositoryNameInvalid should be used to denote an invalid repository
// name. Reason may set, indicating the cause of invalidity.
type ErrRepositoryNameInvalid struct {
Name string
Reason error
}
func (err ErrRepositoryNameInvalid) Error() string {
return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason)
}
// ErrManifestUnknown is returned if the manifest is not known by the
// registry.
type ErrManifestUnknown struct {
Name string
Tag string
}
func (err ErrManifestUnknown) Error() string {
return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
}
// ErrManifestUnknownRevision is returned when a manifest cannot be found by
// revision within a repository.
type ErrManifestUnknownRevision struct {
Name string
Revision digest.Digest
}
func (err ErrManifestUnknownRevision) Error() string {
return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
}
// ErrManifestUnverified is returned when the registry is unable to verify
// the manifest.
type ErrManifestUnverified struct{}
func (ErrManifestUnverified) Error() string {
return "unverified manifest"
}
// ErrManifestVerification provides a type to collect errors encountered
// during manifest verification. Currently, it accepts errors of all types,
// but it may be narrowed to those involving manifest verification.
type ErrManifestVerification []error
func (errs ErrManifestVerification) Error() string {
var parts []string
for _, err := range errs {
parts = append(parts, err.Error())
}
return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
}
// ErrManifestBlobUnknown returned when a referenced blob cannot be found.
type ErrManifestBlobUnknown struct {
Digest digest.Digest
}
func (err ErrManifestBlobUnknown) Error() string {
return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
}
// ErrManifestNameInvalid should be used to denote an invalid manifest
// name. Reason may set, indicating the cause of invalidity.
type ErrManifestNameInvalid struct {
Name string
Reason error
}
func (err ErrManifestNameInvalid) Error() string {
return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
}

View file

@ -1,125 +0,0 @@
package distribution
import (
"fmt"
"mime"
"github.com/docker/distribution/context"
"github.com/opencontainers/go-digest"
)
// Manifest represents a registry object specifying a set of
// references and an optional target
type Manifest interface {
// References returns a list of objects which make up this manifest.
// A reference is anything which can be represented by a
// distribution.Descriptor. These can consist of layers, resources or other
// manifests.
//
// While no particular order is required, implementations should return
// them from highest to lowest priority. For example, one might want to
// return the base layer before the top layer.
References() []Descriptor
// Payload provides the serialized format of the manifest, in addition to
// the media type.
Payload() (mediaType string, payload []byte, err error)
}
// ManifestBuilder creates a manifest allowing one to include dependencies.
// Instances can be obtained from a version-specific manifest package. Manifest
// specific data is passed into the function which creates the builder.
type ManifestBuilder interface {
// Build creates the manifest from his builder.
Build(ctx context.Context) (Manifest, error)
// References returns a list of objects which have been added to this
// builder. The dependencies are returned in the order they were added,
// which should be from base to head.
References() []Descriptor
// AppendReference includes the given object in the manifest after any
// existing dependencies. If the add fails, such as when adding an
// unsupported dependency, an error may be returned.
//
// The destination of the reference is dependent on the manifest type and
// the dependency type.
AppendReference(dependency Describable) error
}
// ManifestService describes operations on image manifests.
type ManifestService interface {
// Exists returns true if the manifest exists.
Exists(ctx context.Context, dgst digest.Digest) (bool, error)
// Get retrieves the manifest specified by the given digest
Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
// Put creates or updates the given manifest returning the manifest digest
Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
// Delete removes the manifest specified by the given digest. Deleting
// a manifest that doesn't exist will return ErrManifestNotFound
Delete(ctx context.Context, dgst digest.Digest) error
}
// ManifestEnumerator enables iterating over manifests
type ManifestEnumerator interface {
// Enumerate calls ingester for each manifest.
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
}
// Describable is an interface for descriptors
type Describable interface {
Descriptor() Descriptor
}
// ManifestMediaTypes returns the supported media types for manifests.
func ManifestMediaTypes() (mediaTypes []string) {
for t := range mappings {
if t != "" {
mediaTypes = append(mediaTypes, t)
}
}
return
}
// UnmarshalFunc implements manifest unmarshalling a given MediaType
type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
var mappings = make(map[string]UnmarshalFunc, 0)
// UnmarshalManifest looks up manifest unmarshal functions based on
// MediaType
func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
// Need to look up by the actual media type, not the raw contents of
// the header. Strip semicolons and anything following them.
var mediaType string
if ctHeader != "" {
var err error
mediaType, _, err = mime.ParseMediaType(ctHeader)
if err != nil {
return nil, Descriptor{}, err
}
}
unmarshalFunc, ok := mappings[mediaType]
if !ok {
unmarshalFunc, ok = mappings[""]
if !ok {
return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType)
}
}
return unmarshalFunc(p)
}
// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
// should be called from specific
func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error {
if _, ok := mappings[mediaType]; ok {
return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType)
}
mappings[mediaType] = u
return nil
}

View file

@ -1,42 +0,0 @@
package reference
import "path"
// IsNameOnly returns true if reference only contains a repo name.
func IsNameOnly(ref Named) bool {
if _, ok := ref.(NamedTagged); ok {
return false
}
if _, ok := ref.(Canonical); ok {
return false
}
return true
}
// FamiliarName returns the familiar name string
// for the given named, familiarizing if needed.
func FamiliarName(ref Named) string {
if nn, ok := ref.(normalizedNamed); ok {
return nn.Familiar().Name()
}
return ref.Name()
}
// FamiliarString returns the familiar string representation
// for the given reference, familiarizing if needed.
func FamiliarString(ref Reference) string {
if nn, ok := ref.(normalizedNamed); ok {
return nn.Familiar().String()
}
return ref.String()
}
// FamiliarMatch reports whether ref matches the specified pattern.
// See https://godoc.org/path#Match for supported patterns.
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
matched, err := path.Match(pattern, FamiliarString(ref))
if namedRef, isNamed := ref.(Named); isNamed && !matched {
matched, _ = path.Match(pattern, FamiliarName(namedRef))
}
return matched, err
}

View file

@ -1,170 +0,0 @@
package reference
import (
"errors"
"fmt"
"strings"
"github.com/docker/distribution/digestset"
"github.com/opencontainers/go-digest"
)
var (
legacyDefaultDomain = "index.docker.io"
defaultDomain = "docker.io"
officialRepoName = "library"
defaultTag = "latest"
)
// normalizedNamed represents a name which has been
// normalized and has a familiar form. A familiar name
// is what is used in Docker UI. An example normalized
// name is "docker.io/library/ubuntu" and corresponding
// familiar name of "ubuntu".
type normalizedNamed interface {
Named
Familiar() Named
}
// ParseNormalizedNamed parses a string into a named reference
// transforming a familiar name from Docker UI to a fully
// qualified reference. If the value may be an identifier
// use ParseAnyReference.
func ParseNormalizedNamed(s string) (Named, error) {
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
}
domain, remainder := splitDockerDomain(s)
var remoteName string
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
remoteName = remainder[:tagSep]
} else {
remoteName = remainder
}
if strings.ToLower(remoteName) != remoteName {
return nil, errors.New("invalid reference format: repository name must be lowercase")
}
ref, err := Parse(domain + "/" + remainder)
if err != nil {
return nil, err
}
named, isNamed := ref.(Named)
if !isNamed {
return nil, fmt.Errorf("reference %s has no name", ref.String())
}
return named, nil
}
// splitDockerDomain splits a repository name to domain and remotename string.
// If no valid domain is found, the default domain is used. Repository name
// needs to be already validated before.
func splitDockerDomain(name string) (domain, remainder string) {
i := strings.IndexRune(name, '/')
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
domain, remainder = defaultDomain, name
} else {
domain, remainder = name[:i], name[i+1:]
}
if domain == legacyDefaultDomain {
domain = defaultDomain
}
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
remainder = officialRepoName + "/" + remainder
}
return
}
// familiarizeName returns a shortened version of the name familiar
// to to the Docker UI. Familiar names have the default domain
// "docker.io" and "library/" repository prefix removed.
// For example, "docker.io/library/redis" will have the familiar
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
// Returns a familiarized named only reference.
func familiarizeName(named namedRepository) repository {
repo := repository{
domain: named.Domain(),
path: named.Path(),
}
if repo.domain == defaultDomain {
repo.domain = ""
// Handle official repositories which have the pattern "library/<official repo name>"
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
repo.path = split[1]
}
}
return repo
}
func (r reference) Familiar() Named {
return reference{
namedRepository: familiarizeName(r.namedRepository),
tag: r.tag,
digest: r.digest,
}
}
func (r repository) Familiar() Named {
return familiarizeName(r)
}
func (t taggedReference) Familiar() Named {
return taggedReference{
namedRepository: familiarizeName(t.namedRepository),
tag: t.tag,
}
}
func (c canonicalReference) Familiar() Named {
return canonicalReference{
namedRepository: familiarizeName(c.namedRepository),
digest: c.digest,
}
}
// TagNameOnly adds the default tag "latest" to a reference if it only has
// a repo name.
func TagNameOnly(ref Named) Named {
if IsNameOnly(ref) {
namedTagged, err := WithTag(ref, defaultTag)
if err != nil {
// Default tag must be valid, to create a NamedTagged
// type with non-validated input the WithTag function
// should be used instead
panic(err)
}
return namedTagged
}
return ref
}
// ParseAnyReference parses a reference string as a possible identifier,
// full digest, or familiar name.
func ParseAnyReference(ref string) (Reference, error) {
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
return digestReference("sha256:" + ref), nil
}
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
return ParseNormalizedNamed(ref)
}
// ParseAnyReferenceWithSet parses a reference string as a possible short
// identifier to be matched in a digest set, a full digest, or familiar name.
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
dgst, err := ds.Lookup(ref)
if err == nil {
return digestReference(dgst), nil
}
} else {
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
}
return ParseNormalizedNamed(ref)
}

View file

@ -1,433 +0,0 @@
// Package reference provides a general type to represent any way of referencing images within the registry.
// Its main purpose is to abstract tags and digests (content-addressable hash).
//
// Grammar
//
// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]*
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/
//
// digest := digest-algorithm ":" digest-hex
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
// digest-algorithm-separator := /[+.-_]/
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
//
// identifier := /[a-f0-9]{64}/
// short-identifier := /[a-f0-9]{6,64}/
package reference
import (
"errors"
"fmt"
"strings"
"github.com/opencontainers/go-digest"
)
const (
// NameTotalLengthMax is the maximum total number of characters in a repository name.
NameTotalLengthMax = 255
)
var (
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
ErrReferenceInvalidFormat = errors.New("invalid reference format")
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
ErrTagInvalidFormat = errors.New("invalid tag format")
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
ErrDigestInvalidFormat = errors.New("invalid digest format")
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
// ErrNameEmpty is returned for empty, invalid repository names.
ErrNameEmpty = errors.New("repository name must have at least one component")
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
// ErrNameNotCanonical is returned when a name is not canonical.
ErrNameNotCanonical = errors.New("repository name must be canonical")
)
// Reference is an opaque object reference identifier that may include
// modifiers such as a hostname, name, tag, and digest.
type Reference interface {
// String returns the full reference
String() string
}
// Field provides a wrapper type for resolving correct reference types when
// working with encoding.
type Field struct {
reference Reference
}
// AsField wraps a reference in a Field for encoding.
func AsField(reference Reference) Field {
return Field{reference}
}
// Reference unwraps the reference type from the field to
// return the Reference object. This object should be
// of the appropriate type to further check for different
// reference types.
func (f Field) Reference() Reference {
return f.reference
}
// MarshalText serializes the field to byte text which
// is the string of the reference.
func (f Field) MarshalText() (p []byte, err error) {
return []byte(f.reference.String()), nil
}
// UnmarshalText parses text bytes by invoking the
// reference parser to ensure the appropriately
// typed reference object is wrapped by field.
func (f *Field) UnmarshalText(p []byte) error {
r, err := Parse(string(p))
if err != nil {
return err
}
f.reference = r
return nil
}
// Named is an object with a full name
type Named interface {
Reference
Name() string
}
// Tagged is an object which has a tag
type Tagged interface {
Reference
Tag() string
}
// NamedTagged is an object including a name and tag.
type NamedTagged interface {
Named
Tag() string
}
// Digested is an object which has a digest
// in which it can be referenced by
type Digested interface {
Reference
Digest() digest.Digest
}
// Canonical reference is an object with a fully unique
// name including a name with domain and digest
type Canonical interface {
Named
Digest() digest.Digest
}
// namedRepository is a reference to a repository with a name.
// A namedRepository has both domain and path components.
type namedRepository interface {
Named
Domain() string
Path() string
}
// Domain returns the domain part of the Named reference
func Domain(named Named) string {
if r, ok := named.(namedRepository); ok {
return r.Domain()
}
domain, _ := splitDomain(named.Name())
return domain
}
// Path returns the name without the domain part of the Named reference
func Path(named Named) (name string) {
if r, ok := named.(namedRepository); ok {
return r.Path()
}
_, path := splitDomain(named.Name())
return path
}
func splitDomain(name string) (string, string) {
match := anchoredNameRegexp.FindStringSubmatch(name)
if len(match) != 3 {
return "", name
}
return match[1], match[2]
}
// SplitHostname splits a named reference into a
// hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value
// is returned as name
// DEPRECATED: Use Domain or Path
func SplitHostname(named Named) (string, string) {
if r, ok := named.(namedRepository); ok {
return r.Domain(), r.Path()
}
return splitDomain(named.Name())
}
// Parse parses s and returns a syntactically valid Reference.
// If an error was encountered it is returned, along with a nil Reference.
// NOTE: Parse will not handle short digests.
func Parse(s string) (Reference, error) {
matches := ReferenceRegexp.FindStringSubmatch(s)
if matches == nil {
if s == "" {
return nil, ErrNameEmpty
}
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
return nil, ErrNameContainsUppercase
}
return nil, ErrReferenceInvalidFormat
}
if len(matches[1]) > NameTotalLengthMax {
return nil, ErrNameTooLong
}
var repo repository
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
if nameMatch != nil && len(nameMatch) == 3 {
repo.domain = nameMatch[1]
repo.path = nameMatch[2]
} else {
repo.domain = ""
repo.path = matches[1]
}
ref := reference{
namedRepository: repo,
tag: matches[2],
}
if matches[3] != "" {
var err error
ref.digest, err = digest.Parse(matches[3])
if err != nil {
return nil, err
}
}
r := getBestReferenceType(ref)
if r == nil {
return nil, ErrNameEmpty
}
return r, nil
}
// ParseNamed parses s and returns a syntactically valid reference implementing
// the Named interface. The reference must have a name and be in the canonical
// form, otherwise an error is returned.
// If an error was encountered it is returned, along with a nil Reference.
// NOTE: ParseNamed will not handle short digests.
func ParseNamed(s string) (Named, error) {
named, err := ParseNormalizedNamed(s)
if err != nil {
return nil, err
}
if named.String() != s {
return nil, ErrNameNotCanonical
}
return named, nil
}
// WithName returns a named object representing the given string. If the input
// is invalid ErrReferenceInvalidFormat will be returned.
func WithName(name string) (Named, error) {
if len(name) > NameTotalLengthMax {
return nil, ErrNameTooLong
}
match := anchoredNameRegexp.FindStringSubmatch(name)
if match == nil || len(match) != 3 {
return nil, ErrReferenceInvalidFormat
}
return repository{
domain: match[1],
path: match[2],
}, nil
}
// WithTag combines the name from "name" and the tag from "tag" to form a
// reference incorporating both the name and the tag.
func WithTag(name Named, tag string) (NamedTagged, error) {
if !anchoredTagRegexp.MatchString(tag) {
return nil, ErrTagInvalidFormat
}
var repo repository
if r, ok := name.(namedRepository); ok {
repo.domain = r.Domain()
repo.path = r.Path()
} else {
repo.path = name.Name()
}
if canonical, ok := name.(Canonical); ok {
return reference{
namedRepository: repo,
tag: tag,
digest: canonical.Digest(),
}, nil
}
return taggedReference{
namedRepository: repo,
tag: tag,
}, nil
}
// WithDigest combines the name from "name" and the digest from "digest" to form
// a reference incorporating both the name and the digest.
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
if !anchoredDigestRegexp.MatchString(digest.String()) {
return nil, ErrDigestInvalidFormat
}
var repo repository
if r, ok := name.(namedRepository); ok {
repo.domain = r.Domain()
repo.path = r.Path()
} else {
repo.path = name.Name()
}
if tagged, ok := name.(Tagged); ok {
return reference{
namedRepository: repo,
tag: tagged.Tag(),
digest: digest,
}, nil
}
return canonicalReference{
namedRepository: repo,
digest: digest,
}, nil
}
// TrimNamed removes any tag or digest from the named reference.
func TrimNamed(ref Named) Named {
domain, path := SplitHostname(ref)
return repository{
domain: domain,
path: path,
}
}
func getBestReferenceType(ref reference) Reference {
if ref.Name() == "" {
// Allow digest only references
if ref.digest != "" {
return digestReference(ref.digest)
}
return nil
}
if ref.tag == "" {
if ref.digest != "" {
return canonicalReference{
namedRepository: ref.namedRepository,
digest: ref.digest,
}
}
return ref.namedRepository
}
if ref.digest == "" {
return taggedReference{
namedRepository: ref.namedRepository,
tag: ref.tag,
}
}
return ref
}
type reference struct {
namedRepository
tag string
digest digest.Digest
}
func (r reference) String() string {
return r.Name() + ":" + r.tag + "@" + r.digest.String()
}
func (r reference) Tag() string {
return r.tag
}
func (r reference) Digest() digest.Digest {
return r.digest
}
type repository struct {
domain string
path string
}
func (r repository) String() string {
return r.Name()
}
func (r repository) Name() string {
if r.domain == "" {
return r.path
}
return r.domain + "/" + r.path
}
func (r repository) Domain() string {
return r.domain
}
func (r repository) Path() string {
return r.path
}
type digestReference digest.Digest
func (d digestReference) String() string {
return digest.Digest(d).String()
}
func (d digestReference) Digest() digest.Digest {
return digest.Digest(d)
}
type taggedReference struct {
namedRepository
tag string
}
func (t taggedReference) String() string {
return t.Name() + ":" + t.tag
}
func (t taggedReference) Tag() string {
return t.tag
}
type canonicalReference struct {
namedRepository
digest digest.Digest
}
func (c canonicalReference) String() string {
return c.Name() + "@" + c.digest.String()
}
func (c canonicalReference) Digest() digest.Digest {
return c.digest
}

View file

@ -1,143 +0,0 @@
package reference
import "regexp"
var (
// alphaNumericRegexp defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumericRegexp = match(`[a-z0-9]+`)
// separatorRegexp defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
// dashes.
separatorRegexp = match(`(?:[._]|__|[-]*)`)
// nameComponentRegexp restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponentRegexp = expression(
alphaNumericRegexp,
optional(repeated(separatorRegexp, alphaNumericRegexp)))
// domainComponentRegexp restricts the registry domain component of a
// repository name to start with a component as defined by DomainRegexp
// and followed by an optional port.
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
// DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
DomainRegexp = expression(
domainComponentRegexp,
optional(repeated(literal(`.`), domainComponentRegexp)),
optional(literal(`:`), match(`[0-9]+`)))
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
TagRegexp = match(`[\w][\w.-]{0,127}`)
// anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string.
anchoredTagRegexp = anchored(TagRegexp)
// DigestRegexp matches valid digests.
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
// anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string.
anchoredDigestRegexp = anchored(DigestRegexp)
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
NameRegexp = expression(
optional(DomainRegexp, literal(`/`)),
nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp)))
// anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components.
anchoredNameRegexp = anchored(
optional(capture(DomainRegexp), literal(`/`)),
capture(nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp))))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
ReferenceRegexp = anchored(capture(NameRegexp),
optional(literal(":"), capture(TagRegexp)),
optional(literal("@"), capture(DigestRegexp)))
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
IdentifierRegexp = match(`([a-f0-9]{64})`)
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
// anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string.
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
// anchoredShortIdentifierRegexp is used to check if a value
// is a possible identifier prefix, anchored at start and end
// of string.
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
)
// match compiles the string to a regular expression.
var match = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) *regexp.Regexp {
re := match(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...*regexp.Regexp) *regexp.Regexp {
var s string
for _, re := range res {
s += re.String()
}
return match(s)
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `?`)
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `+`)
}
// group wraps the regexp in a non-capturing group.
func group(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(?:` + expression(res...).String() + `)`)
}
// capture wraps the expression in a capturing group.
func capture(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(` + expression(res...).String() + `)`)
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
return match(`^` + expression(res...).String() + `$`)
}

View file

@ -1,97 +0,0 @@
package distribution
import (
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
)
// Scope defines the set of items that match a namespace.
type Scope interface {
// Contains returns true if the name belongs to the namespace.
Contains(name string) bool
}
type fullScope struct{}
func (f fullScope) Contains(string) bool {
return true
}
// GlobalScope represents the full namespace scope which contains
// all other scopes.
var GlobalScope = Scope(fullScope{})
// Namespace represents a collection of repositories, addressable by name.
// Generally, a namespace is backed by a set of one or more services,
// providing facilities such as registry access, trust, and indexing.
type Namespace interface {
// Scope describes the names that can be used with this Namespace. The
// global namespace will have a scope that matches all names. The scope
// effectively provides an identity for the namespace.
Scope() Scope
// Repository should return a reference to the named repository. The
// registry may or may not have the repository but should always return a
// reference.
Repository(ctx context.Context, name reference.Named) (Repository, error)
// Repositories fills 'repos' with a lexicographically sorted catalog of repositories
// up to the size of 'repos' and returns the value 'n' for the number of entries
// which were filled. 'last' contains an offset in the catalog, and 'err' will be
// set to io.EOF if there are no more entries to obtain.
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
// Blobs returns a blob enumerator to access all blobs
Blobs() BlobEnumerator
// BlobStatter returns a BlobStatter to control
BlobStatter() BlobStatter
}
// RepositoryEnumerator describes an operation to enumerate repositories
type RepositoryEnumerator interface {
Enumerate(ctx context.Context, ingester func(string) error) error
}
// ManifestServiceOption is a function argument for Manifest Service methods
type ManifestServiceOption interface {
Apply(ManifestService) error
}
// WithTag allows a tag to be passed into Put
func WithTag(tag string) ManifestServiceOption {
return WithTagOption{tag}
}
// WithTagOption holds a tag
type WithTagOption struct{ Tag string }
// Apply conforms to the ManifestServiceOption interface
func (o WithTagOption) Apply(m ManifestService) error {
// no implementation
return nil
}
// Repository is a named collection of manifests and layers.
type Repository interface {
// Named returns the name of the repository.
Named() reference.Named
// Manifests returns a reference to this repository's manifest service.
// with the supplied options applied.
Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
// Blobs returns a reference to this repository's blob service.
Blobs(ctx context.Context) BlobStore
// TODO(stevvooe): The above BlobStore return can probably be relaxed to
// be a BlobService for use with clients. This will allow such
// implementations to avoid implementing ServeBlob.
// Tags returns a reference to this repositories tag service
Tags(ctx context.Context) TagService
}
// TODO(stevvooe): Must add close methods to all these. May want to change the
// way instances are created to better reflect internal dependency
// relationships.

View file

@ -1,27 +0,0 @@
package distribution
import (
"github.com/docker/distribution/context"
)
// TagService provides access to information about tagged objects.
type TagService interface {
// Get retrieves the descriptor identified by the tag. Some
// implementations may differentiate between "trusted" tags and
// "untrusted" tags. If a tag is "untrusted", the mapping will be returned
// as an ErrTagUntrusted error, with the target descriptor.
Get(ctx context.Context, tag string) (Descriptor, error)
// Tag associates the tag with the provided descriptor, updating the
// current association, if needed.
Tag(ctx context.Context, tag string, desc Descriptor) error
// Untag removes the given tag association
Untag(ctx context.Context, tag string) error
// All returns the set of tags managed by this tag service
All(ctx context.Context) ([]string, error)
// Lookup returns the set of tags referencing the given digest.
Lookup(ctx context.Context, digest Descriptor) ([]string, error)
}

View file

@ -1,43 +0,0 @@
github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e
github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356
github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716
github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04
github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85
github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c
github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a
github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6
github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064
github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842
github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870
github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985
github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e
github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128
github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6
golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b
golang.org/x/net 4876518f9e71663000c348837735820161a42df7
golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54
google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19
google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2
google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994
gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673
gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b
gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420
rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb

View file

@ -30,9 +30,7 @@ func MirrorStreamHandler(stream *Stream) {
}()
}
// NoopStreamHandler does nothing when stream connects, most
// likely used with RejectAuthHandler which will not allow any
// streams to make it to the stream handler.
// NoopStreamHandler does nothing when stream connects.
func NoOpStreamHandler(stream *Stream) {
stream.SendReply(http.Header{}, false)
}

16
vendor/github.com/evanphx/json-patch/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,16 @@
language: go
go:
- 1.8
- 1.7
install:
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
- go get github.com/jessevdk/go-flags
script:
- go get
- go test -cover ./...
notifications:
email: false

25
vendor/github.com/evanphx/json-patch/LICENSE generated vendored Normal file
View file

@ -0,0 +1,25 @@
Copyright (c) 2014, Evan Phoenix
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Evan Phoenix nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Some files were not shown because too many files have changed in this diff Show more