Merge pull request #3498 from aledbf/update-godeps

Update godeps
This commit is contained in:
Kubernetes Prow Robot 2018-12-05 09:07:08 -08:00 committed by GitHub
commit be3ff42858
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
1333 changed files with 71576 additions and 91543 deletions

392
Gopkg.lock generated
View file

@ -2,15 +2,23 @@
[[projects]] [[projects]]
digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57" digest = "1:5ad08b0e14866764a6d7475eb11c9cf05cad9a52c442593bdfa544703ff77f61"
name = "cloud.google.com/go" name = "cloud.google.com/go"
packages = ["compute/metadata"] packages = ["compute/metadata"]
pruneopts = "NUT" pruneopts = "NUT"
revision = "97efc2c9ffd9fe8ef47f7f3203dc60bbca547374" revision = "74b12019e2aa53ec27882158f59192d7cd6d1998"
version = "v0.28.0" version = "v0.33.1"
[[projects]] [[projects]]
digest = "1:9fe4851c1eb1ab8c7486fee4e2d06db0e6509d6772211177e631c1abfb41b720" digest = "1:f323f98930459f65f4699b5bfba563743680e2633d96e72d61a9732648e2d07c"
name = "contrib.go.opencensus.io/exporter/ocagent"
packages = ["."]
pruneopts = "NUT"
revision = "00af367e65149ff1f2f4b93bbfbb84fd9297170d"
version = "v0.2.0"
[[projects]]
digest = "1:fff60f8e65c264c3fe391e671cd84ce292a748c0a3fa19cdcef0cc34ffc123ae"
name = "github.com/Azure/go-autorest" name = "github.com/Azure/go-autorest"
packages = [ packages = [
"autorest", "autorest",
@ -18,19 +26,19 @@
"autorest/azure", "autorest/azure",
"autorest/date", "autorest/date",
"logger", "logger",
"version", "tracing",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6" revision = "f401b1ccc8eb505927fae7a0c7f6406d37ca1c7e"
version = "v10.15.5" version = "v11.2.8"
[[projects]] [[projects]]
digest = "1:01252cd79aac70f16cac02a72a1067dd136e0ad6d5b597d0129cf74c739fd8d1" digest = "1:d848e2bdc690ea54c4b49894b67a05db318a97ee6561879b814c2c1f82f61406"
name = "github.com/Sirupsen/logrus" name = "github.com/Sirupsen/logrus"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "a67f783a3814b8729bd2dac5780b5f78f8dbd64d" revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95"
version = "v1.1.0" version = "v1.2.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -48,6 +56,19 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb" revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
digest = "1:65b0d980b428a6ad4425f2df4cd5410edd81f044cf527bd1c345368444649e58"
name = "github.com/census-instrumentation/opencensus-proto"
packages = [
"gen-go/agent/common/v1",
"gen-go/agent/trace/v1",
"gen-go/resource/v1",
"gen-go/trace/v1",
]
pruneopts = "NUT"
revision = "7f2434bc10da710debe5c4315ed6d4df454b4024"
version = "v0.1.0"
[[projects]] [[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew" name = "github.com/davecgh/go-spew"
@ -64,16 +85,6 @@
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0" version = "v3.2.0"
[[projects]]
digest = "1:4189ee6a3844f555124d9d2656fe7af02fca961c2a9bad9074789df13a0c62e0"
name = "github.com/docker/distribution"
packages = [
"digestset",
"reference",
]
pruneopts = "NUT"
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
[[projects]] [[projects]]
digest = "1:4340101f42556a9cb2f7a360a0e95a019bfef6247d92e6c4c46f2433cf86a482" digest = "1:4340101f42556a9cb2f7a360a0e95a019bfef6247d92e6c4c46f2433cf86a482"
name = "github.com/docker/go-units" name = "github.com/docker/go-units"
@ -84,14 +95,14 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:da25cf063072a10461c19320e82117d85f9d60be4c95a62bc8d5a49acf7d0ca5" digest = "1:eb8f1b1913bffd6e788deee9fe4ba3a4d83267aff6045d3be33105e35ece290b"
name = "github.com/docker/spdystream" name = "github.com/docker/spdystream"
packages = [ packages = [
".", ".",
"spdy", "spdy",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "bc6354cbbc295e925e4c611ffe90c1f287ee54db" revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -109,6 +120,14 @@
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
version = "v1.1.0" version = "v1.1.0"
[[projects]]
digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f"
name = "github.com/evanphx/json-patch"
packages = ["."]
pruneopts = "NUT"
revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5"
version = "v4.1.0"
[[projects]] [[projects]]
digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129"
name = "github.com/fsnotify/fsnotify" name = "github.com/fsnotify/fsnotify"
@ -125,14 +144,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "8306686428a5fe132eac8cb7c4848af725098bd4" revision = "8306686428a5fe132eac8cb7c4848af725098bd4"
[[projects]]
digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
name = "github.com/ghodss/yaml"
packages = ["."]
pruneopts = "NUT"
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
[[projects]] [[projects]]
digest = "1:cd4f86461732066e277335465962660cbf02999e18d5bbb5e9285eac4608b970" digest = "1:cd4f86461732066e277335465962660cbf02999e18d5bbb5e9285eac4608b970"
name = "github.com/gogo/protobuf" name = "github.com/gogo/protobuf"
@ -146,24 +157,16 @@
revision = "636bf0302bc95575d69441b25a2603156ffdddf1" revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
version = "v1.1.1" version = "v1.1.1"
[[projects]]
branch = "master"
digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a"
name = "github.com/golang/glog"
packages = ["."]
pruneopts = "NUT"
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8" digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8"
name = "github.com/golang/groupcache" name = "github.com/golang/groupcache"
packages = ["lru"] packages = ["lru"]
pruneopts = "NUT" pruneopts = "NUT"
revision = "6f2cf27854a4a29e3811b0371547be335d411b8b" revision = "c65c006176ff7ff98bb916961c7abbc6b0afc0aa"
[[projects]] [[projects]]
digest = "1:63ccdfbd20f7ccd2399d0647a7d100b122f79c13bb83da9660b1598396fd9f62" digest = "1:479e958ad7ae540d7a3c565d1839cc7c8ab9b627640144443f1e88d11d4023d0"
name = "github.com/golang/protobuf" name = "github.com/golang/protobuf"
packages = [ packages = [
"proto", "proto",
@ -171,6 +174,7 @@
"ptypes/any", "ptypes/any",
"ptypes/duration", "ptypes/duration",
"ptypes/timestamp", "ptypes/timestamp",
"ptypes/wrappers",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
@ -193,12 +197,12 @@
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]] [[projects]]
digest = "1:a1578f7323eca2b88021fdc9a79a99833d40b12c32a5ea4f284e2fad19ea2657" digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1"
name = "github.com/google/uuid" name = "github.com/google/uuid"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "d460ce9f8df2e77fb1ba55ca87fafed96c607494" revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8"
version = "v1.0.0" version = "v1.1.0"
[[projects]] [[projects]]
digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e" digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e"
@ -214,7 +218,7 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:8ccbd0e8ef8b6e59b331991fcb5477fb5e6c51b7d1be09789d7dc0a33e95bf6a" digest = "1:3f5485f5f0ea50de409c25414eaf4154bd54b2fa9ef03fc4a0a278967daac906"
name = "github.com/gophercloud/gophercloud" name = "github.com/gophercloud/gophercloud"
packages = [ packages = [
".", ".",
@ -226,7 +230,7 @@
"pagination", "pagination",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "39db44929bf62867520de8607bd519217a78f802" revision = "8f4eb476f72cbb430de821134208c549a643e99b"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -237,7 +241,7 @@
"diskcache", "diskcache",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "9cad4c3443a7200dd6400aef47183728de563a38" revision = "c63ab54fda8f77302f8d414e19933f2b6026a089"
[[projects]] [[projects]]
digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6" digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6"
@ -281,12 +285,12 @@
version = "v1.1.5" version = "v1.1.5"
[[projects]] [[projects]]
branch = "master" digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed"
digest = "1:c8a452cc8dd4ef9f857570ce2be31ca257a0928bf3c2b08cd7e11972b985c6d7"
name = "github.com/konsorten/go-windows-terminal-sequences" name = "github.com/konsorten/go-windows-terminal-sequences"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "b729f2633dfe35f4d1d8a32385f6685610ce1cb5" revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
version = "v1.0.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -325,11 +329,11 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:5fe20cfe4ef484c237cec9f947b2a6fa90bad4b8610fd014f0e4211e13d82d5d" digest = "1:a45ae66dea4c899d79fceb116accfa1892105c251f0dcd9a217ddc276b42ec68"
name = "github.com/mitchellh/mapstructure" name = "github.com/mitchellh/mapstructure"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "fa473d140ef3c6adf42d6b391fe76707f1f243c8" revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
[[projects]] [[projects]]
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
@ -348,27 +352,47 @@
version = "1.0.1" version = "1.0.1"
[[projects]] [[projects]]
branch = "master"
digest = "1:af70f88d68d35881112c13fa94ae3fcb53cfe14c4b8fb3d87a345bbf442d2747" digest = "1:af70f88d68d35881112c13fa94ae3fcb53cfe14c4b8fb3d87a345bbf442d2747"
name = "github.com/moul/http2curl" name = "github.com/moul/http2curl"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "9ac6cf4d929b2fa8fd2d2e6dec5bb0feb4f4911d" revision = "9ac6cf4d929b2fa8fd2d2e6dec5bb0feb4f4911d"
version = "v1.0.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:7b782f694db508189ee26915bb12dcb74aefd5d96411ad8b04a12d880df1b810" digest = "1:9e33629d4ec9e9344715a54fa0a107f23ce800deb13999b0190df04c3540ccb5"
name = "github.com/ncabatoff/go-seq"
packages = ["seq"]
pruneopts = "NUT"
revision = "b08ef85ed83364cba413c98a94bbd4169a0ce70b"
[[projects]]
branch = "master"
digest = "1:fcdc1a06529f364e1ba0e8a85540ae7ebbbe2e4b00d40245b24d1b8a3907b2e6"
name = "github.com/ncabatoff/process-exporter" name = "github.com/ncabatoff/process-exporter"
packages = [ packages = [
".", ".",
"proc", "proc",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "5917bc766b95a1fa3c2ae85340f4de02a6b7e15e" revision = "bdf24ef23850ba2fc593f244256482375f7cbfcd"
source = "github.com/aledbf/process-exporter"
[[projects]] [[projects]]
digest = "1:0be1cd4c73d5e22a30edcf32a18e9809a370a7a4a52c4f41a86070b34da93fef" branch = "add-proc-status"
digest = "1:75f0f2e92ea523185b28b9377984503a7b2be7c7451831eca5478b20998a3799"
name = "github.com/ncabatoff/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs",
]
pruneopts = "NUT"
revision = "e1a38cb53622f65e073c5e750e6498a44ebfbd2a"
[[projects]]
digest = "1:cdc5cfc04dd0b98f86433207fe6d9879757c46734441a08886acc11251c7ed4a"
name = "github.com/onsi/ginkgo" name = "github.com/onsi/ginkgo"
packages = [ packages = [
".", ".",
@ -391,11 +415,11 @@
"types", "types",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "3774a09d95489ccaa16032e0770d08ea77ba6184" revision = "2e1be8f7d90e9d3e3e58b0ce470f2f14d075406f"
version = "v1.6.0" version = "v1.7.0"
[[projects]] [[projects]]
digest = "1:95f40a9db820078d1795c7ba2d476016aca05dc4267eaf6752a925e437cb351f" digest = "1:0db10c512b410a1ecd8845e31db52622c08b76198f7ab76afb25319c84a7fd4b"
name = "github.com/onsi/gomega" name = "github.com/onsi/gomega"
packages = [ packages = [
".", ".",
@ -412,16 +436,8 @@
"types", "types",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "7615b9433f86a8bdf29709bf288bc4fd0636a369" revision = "65fb64232476ad9046e57c26cd0bff3d3a8dc6cd"
version = "v1.4.2" version = "v1.4.3"
[[projects]]
digest = "1:e0cc8395ea893c898ff5eb0850f4d9851c1f57c78c232304a026379a47a552d0"
name = "github.com/opencontainers/go-digest"
packages = ["."]
pruneopts = "NUT"
revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf"
version = "v1.0.0-rc1"
[[projects]] [[projects]]
digest = "1:c9e0e109a897ef306f865e55e07ecb1c3024edafd103e1c2b1a06a852dc91cb3" digest = "1:c9e0e109a897ef306f865e55e07ecb1c3024edafd103e1c2b1a06a852dc91cb3"
@ -484,7 +500,7 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:3b374d4b78e68dbab0cc07ce56e75d8b267ce1ac1de6c7b610191789abd3fee1" digest = "1:69c96554af95dcf0afbabae18949708bd165a5c8393ee6e7299fc5a583f595a4"
name = "github.com/prometheus/client_golang" name = "github.com/prometheus/client_golang"
packages = [ packages = [
"prometheus", "prometheus",
@ -492,7 +508,7 @@
"prometheus/promhttp", "prometheus/promhttp",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "2d5a6493f89f13c9779c2e097e0710e0f4478bae" revision = "32b1bb4674c4db541df5b547bb1a325337522022"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -504,7 +520,7 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b" digest = "1:06375f3b602de9c99fa99b8484f0e949fd5273e6e9c6592b5a0dd4cd9085f3ea"
name = "github.com/prometheus/common" name = "github.com/prometheus/common"
packages = [ packages = [
"expfmt", "expfmt",
@ -512,11 +528,11 @@
"model", "model",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" revision = "4724e9255275ce38f7179b2478abeae4e28c904f"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:26a2f5e891cc4d2321f18a0caa84c8e788663c17bed6a487f3cbe2c4295292d0" digest = "1:102dea0c03a915acfc634b7c67f2662012b5483b56d9025e33f5188e112759b6"
name = "github.com/prometheus/procfs" name = "github.com/prometheus/procfs"
packages = [ packages = [
".", ".",
@ -525,7 +541,7 @@
"xfs", "xfs",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2" revision = "aa55a523dc0a8297edf51bb75e8eec13eb3be45d"
[[projects]] [[projects]]
digest = "1:330e9062b308ac597e28485699c02223bd052437a6eed32a173c9227dcb9d95a" digest = "1:330e9062b308ac597e28485699c02223bd052437a6eed32a173c9227dcb9d95a"
@ -539,12 +555,12 @@
version = "v1.1.2" version = "v1.1.2"
[[projects]] [[projects]]
digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675" digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
name = "github.com/spf13/pflag" name = "github.com/spf13/pflag"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5" revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.2" version = "v1.0.3"
[[projects]] [[projects]]
digest = "1:c10994a08ed2ff2cc7611d03ded8bb5f782096880b2daab391adbd9ab95a1764" digest = "1:c10994a08ed2ff2cc7611d03ded8bb5f782096880b2daab391adbd9ab95a1764"
@ -555,21 +571,40 @@
version = "1.0.2" version = "1.0.2"
[[projects]] [[projects]]
branch = "master" digest = "1:f2805adeca595d7dbd25173b57f83daaa79f44d43475263c4e34b05020eac9a7"
digest = "1:8e241498e35f550e5192ee6b1f6ff2c0a7ffe81feff9541d297facffe1383979" name = "go.opencensus.io"
name = "golang.org/x/crypto"
packages = [ packages = [
"ed25519", ".",
"ed25519/internal/edwards25519", "exemplar",
"pbkdf2", "internal",
"ssh/terminal", "internal/tagencoding",
"plugin/ochttp",
"plugin/ochttp/propagation/b3",
"plugin/ochttp/propagation/tracecontext",
"stats",
"stats/internal",
"stats/view",
"tag",
"trace",
"trace/internal",
"trace/propagation",
"trace/tracestate",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "5295e8364332db77d75fce11f1d19c053919a9c9" revision = "b7bf3cdb64150a8c8c53b769fdeb2ba581bd4d4b"
version = "v0.18.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:f7468b77e2eb541c768de1f12e3ad98debb07f1d49409f40f49b898588eca448" digest = "1:38f553aff0273ad6f367cb0a0f8b6eecbaef8dc6cb8b50e57b6a81c1d5b1e332"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = "NUT"
revision = "eb0de9b17e854e9b1ccd9963efafc79862359959"
[[projects]]
branch = "master"
digest = "1:3441e889d2a118498de288fcfe0eb1726831a76c3285af5357674eb1cc150a0f"
name = "golang.org/x/net" name = "golang.org/x/net"
packages = [ packages = [
"context", "context",
@ -586,11 +621,11 @@
"trace", "trace",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de" revision = "351d144fa1fc0bd934e2408202be0c29f25e35a0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:bc2b221d465bb28ce46e8d472ecdc424b9a9b541bd61d8c311c5f29c8dd75b1b" digest = "1:169c27544e7f54a861a05cd84078b494eaf3e41543f0dfd4dd215fa32139cd40"
name = "golang.org/x/oauth2" name = "golang.org/x/oauth2"
packages = [ packages = [
".", ".",
@ -600,18 +635,26 @@
"jwt", "jwt",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" revision = "28207608b83849a028d4f12e46533a6b6894ecaf"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:c8c9c630940822c796f2cdb5f7218d9710ea7d544f379813d8680f07590c2fee" digest = "1:5e4d81c50cffcb124b899e4f3eabec3930c73532f0096c27f94476728ba03028"
name = "golang.org/x/sync"
packages = ["semaphore"]
pruneopts = "NUT"
revision = "42b317875d0fa942474b76e1b46a6060d720ae6e"
[[projects]]
branch = "master"
digest = "1:6ddfd101211f81df3ba1f474baf1c451f7708f01c1e0c4be49cd9f0af03596cf"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = [ packages = [
"unix", "unix",
"windows", "windows",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "dad3d9fb7b6e83d0f9ac8f54670f6334c3a287b4" revision = "4ed8d59d0b35e1e29334a206d1b3f38b1e5dfb31"
[[projects]] [[projects]]
digest = "1:8a12cbc891b7130d3f660f8a309e5c0b083f831e6ac38cdaa1f12e63c12d6bea" digest = "1:8a12cbc891b7130d3f660f8a309e5c0b083f831e6ac38cdaa1f12e63c12d6bea"
@ -650,14 +693,22 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4" digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
name = "golang.org/x/time" name = "golang.org/x/time"
packages = ["rate"] packages = ["rate"]
pruneopts = "NUT" pruneopts = "NUT"
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
[[projects]] [[projects]]
digest = "1:e2da54c7866453ac5831c61c7ec5d887f39328cac088c806553303bff4048e6f" branch = "master"
digest = "1:5f003878aabe31d7f6b842d4de32b41c46c214bb629bb485387dbcce1edf5643"
name = "google.golang.org/api"
packages = ["support/bundler"]
pruneopts = "NUT"
revision = "af4fc4062c262223ddc2d92f5f35a93690db383a"
[[projects]]
digest = "1:b63b351b57e64ae8aec1af030dc5e74a2676fcda62102f006ae4411fac1b04c8"
name = "google.golang.org/appengine" name = "google.golang.org/appengine"
packages = [ packages = [
".", ".",
@ -672,8 +723,8 @@
"urlfetch", "urlfetch",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06" revision = "4a4468ece617fc8205e99368fa2200e9d1fad421"
version = "v1.2.0" version = "v1.3.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -681,10 +732,10 @@
name = "google.golang.org/genproto" name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"] packages = ["googleapis/rpc/status"]
pruneopts = "NUT" pruneopts = "NUT"
revision = "0e822944c569bf5c9afd034adaa56208bd2906ac" revision = "31ac5d88444a9e7ad18077db9a165d793ad06a2e"
[[projects]] [[projects]]
digest = "1:5b805b8e03b29399b344655cac16873f026e54dc0a7c17b381f6f4d4c7b6d741" digest = "1:2f91d3e11b666570f8c923912f1cc8cf2f0c6b7371b2687ee67a8f73f08c6272"
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
packages = [ packages = [
".", ".",
@ -715,8 +766,8 @@
"tap", "tap",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1" revision = "2e463a05d100327ca47ac218281906921038fd95"
version = "v1.15.0" version = "v1.16.0"
[[projects]] [[projects]]
digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129"
@ -751,19 +802,6 @@
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
version = "v0.9.1" version = "v0.9.1"
[[projects]]
digest = "1:812f9446bc99ebd1c66c55fa456ff7843f7105d22f11f0a2098bced37e9c6d32"
name = "gopkg.in/square/go-jose.v2"
packages = [
".",
"cipher",
"json",
"jwt",
]
pruneopts = "NUT"
revision = "ef984e69dd356202fd4e4910d4d9c24468bdf0b8"
version = "v2.1.9"
[[projects]] [[projects]]
branch = "v1" branch = "v1"
digest = "1:8fb1ccb16a6cfecbfdfeb84d8ea1cc7afa8f9ef16526bc2326f72d993e32cef1" digest = "1:8fb1ccb16a6cfecbfdfeb84d8ea1cc7afa8f9ef16526bc2326f72d993e32cef1"
@ -773,15 +811,15 @@
revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
[[projects]] [[projects]]
digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082" digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f"
name = "gopkg.in/yaml.v2" name = "gopkg.in/yaml.v2"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
version = "v2.2.1" version = "v2.2.2"
[[projects]] [[projects]]
digest = "1:c6b0cfc418f5e8bb9169d78f1024cb1c9f3e61c9a76235134d26d3f28ecdf27b" digest = "1:c453ddc26bdab1e4267683a588ad9046e48d803a73f124fe2927adbab6ff02a5"
name = "k8s.io/api" name = "k8s.io/api"
packages = [ packages = [
"admissionregistration/v1alpha1", "admissionregistration/v1alpha1",
@ -789,6 +827,7 @@
"apps/v1", "apps/v1",
"apps/v1beta1", "apps/v1beta1",
"apps/v1beta2", "apps/v1beta2",
"auditregistration/v1alpha1",
"authentication/v1", "authentication/v1",
"authentication/v1beta1", "authentication/v1beta1",
"authorization/v1", "authorization/v1",
@ -817,10 +856,10 @@
"storage/v1beta1", "storage/v1beta1",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "kubernetes-1.12.1" revision = "kubernetes-1.13.0-rc.2"
[[projects]] [[projects]]
digest = "1:6863750b53ac3e57c4ea2b068c6c07d3b42a6dc965f64c5fdd56ae2ab768deb5" digest = "1:501a73762f1b2c4530206ffb657b39d8b58a9b40280d30e4509ae1232767962c"
name = "k8s.io/apiextensions-apiserver" name = "k8s.io/apiextensions-apiserver"
packages = [ packages = [
"pkg/apis/apiextensions", "pkg/apis/apiextensions",
@ -828,24 +867,20 @@
"pkg/client/clientset/clientset", "pkg/client/clientset/clientset",
"pkg/client/clientset/clientset/scheme", "pkg/client/clientset/clientset/scheme",
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1", "pkg/client/clientset/clientset/typed/apiextensions/v1beta1",
"pkg/features",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "kubernetes-1.12.1" revision = "kubernetes-1.13.0-rc.2"
[[projects]] [[projects]]
digest = "1:f48552d381c18aebe56e96a8b0478430def42a30ac20945f769eb90ada979b52" digest = "1:692e27ed8a5eb2d74bde52d323d428814cd9a6e0f726d02ffd60fda7819e1ee7"
name = "k8s.io/apimachinery" name = "k8s.io/apimachinery"
packages = [ packages = [
"pkg/api/equality",
"pkg/api/errors", "pkg/api/errors",
"pkg/api/meta", "pkg/api/meta",
"pkg/api/resource", "pkg/api/resource",
"pkg/api/validation",
"pkg/apis/meta/internalversion", "pkg/apis/meta/internalversion",
"pkg/apis/meta/v1", "pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured", "pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1/validation",
"pkg/apis/meta/v1beta1", "pkg/apis/meta/v1beta1",
"pkg/conversion", "pkg/conversion",
"pkg/conversion/queryparams", "pkg/conversion/queryparams",
@ -873,7 +908,6 @@
"pkg/util/mergepatch", "pkg/util/mergepatch",
"pkg/util/naming", "pkg/util/naming",
"pkg/util/net", "pkg/util/net",
"pkg/util/rand",
"pkg/util/remotecommand", "pkg/util/remotecommand",
"pkg/util/runtime", "pkg/util/runtime",
"pkg/util/sets", "pkg/util/sets",
@ -890,25 +924,20 @@
"third_party/forked/golang/reflect", "third_party/forked/golang/reflect",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "kubernetes-1.12.1" revision = "kubernetes-1.13.0-rc.2"
[[projects]] [[projects]]
digest = "1:8eaab7022d2018a1e398e049328f9ae6c35812bb3373441194f12d916d3b8140" digest = "1:cc0487260dc4ffb2b513273ad8438497b8df2d8c0de90aaf03d22cc5b58e3fe1"
name = "k8s.io/apiserver" name = "k8s.io/apiserver"
packages = [ packages = [
"pkg/authentication/authenticator",
"pkg/authentication/serviceaccount",
"pkg/authentication/user",
"pkg/features",
"pkg/server/healthz", "pkg/server/healthz",
"pkg/util/feature",
"pkg/util/logs", "pkg/util/logs",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "kubernetes-1.12.1" revision = "kubernetes-1.13.0-rc.2"
[[projects]] [[projects]]
digest = "1:8e32eb6edca8a05b29222291da9447034c71d7524705b28db7d34366fb393b3c" digest = "1:14961132526c5e588ccfa30efd6c977db308c1b1fb83ad4043c3a92c961521ae"
name = "k8s.io/client-go" name = "k8s.io/client-go"
packages = [ packages = [
"discovery", "discovery",
@ -921,6 +950,8 @@
"informers/apps/v1", "informers/apps/v1",
"informers/apps/v1beta1", "informers/apps/v1beta1",
"informers/apps/v1beta2", "informers/apps/v1beta2",
"informers/auditregistration",
"informers/auditregistration/v1alpha1",
"informers/autoscaling", "informers/autoscaling",
"informers/autoscaling/v1", "informers/autoscaling/v1",
"informers/autoscaling/v2beta1", "informers/autoscaling/v2beta1",
@ -970,6 +1001,8 @@
"kubernetes/typed/apps/v1beta1/fake", "kubernetes/typed/apps/v1beta1/fake",
"kubernetes/typed/apps/v1beta2", "kubernetes/typed/apps/v1beta2",
"kubernetes/typed/apps/v1beta2/fake", "kubernetes/typed/apps/v1beta2/fake",
"kubernetes/typed/auditregistration/v1alpha1",
"kubernetes/typed/auditregistration/v1alpha1/fake",
"kubernetes/typed/authentication/v1", "kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1/fake", "kubernetes/typed/authentication/v1/fake",
"kubernetes/typed/authentication/v1beta1", "kubernetes/typed/authentication/v1beta1",
@ -1027,6 +1060,7 @@
"listers/apps/v1", "listers/apps/v1",
"listers/apps/v1beta1", "listers/apps/v1beta1",
"listers/apps/v1beta2", "listers/apps/v1beta2",
"listers/auditregistration/v1alpha1",
"listers/autoscaling/v1", "listers/autoscaling/v1",
"listers/autoscaling/v2beta1", "listers/autoscaling/v2beta1",
"listers/autoscaling/v2beta2", "listers/autoscaling/v2beta2",
@ -1076,12 +1110,10 @@
"tools/record", "tools/record",
"tools/reference", "tools/reference",
"tools/remotecommand", "tools/remotecommand",
"tools/watch",
"transport", "transport",
"transport/spdy", "transport/spdy",
"util/buffer", "util/buffer",
"util/cert", "util/cert",
"util/cert/triple",
"util/connrotation", "util/connrotation",
"util/exec", "util/exec",
"util/flowcontrol", "util/flowcontrol",
@ -1092,11 +1124,19 @@
"util/workqueue", "util/workqueue",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "kubernetes-1.12.1" revision = "kubernetes-1.13.0-rc.2"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:9ca673cabdf1a203b841f27246e7f1211a6839a251bcd34c47d90a8e170fcf27" digest = "1:aac84f18a5f8ef84b0048e4d8856521ef3d33cd50fd839326fa92befcd92bfd4"
name = "k8s.io/cloud-provider"
packages = ["."]
pruneopts = "NUT"
revision = "9b77dc1c384685cb732b3025ed5689dd597a5971"
[[projects]]
branch = "master"
digest = "1:741fc393d821a1acb7f8c85cd3cf8675f2c7f08a47096c4ca7ef6229f5acc763"
name = "k8s.io/csi-api" name = "k8s.io/csi-api"
packages = [ packages = [
"pkg/apis/csi/v1alpha1", "pkg/apis/csi/v1alpha1",
@ -1105,83 +1145,64 @@
"pkg/client/clientset/versioned/typed/csi/v1alpha1", "pkg/client/clientset/versioned/typed/csi/v1alpha1",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "31ae05d8096db803f5b4ff16cda6059c0a9cc861" revision = "61a1735c3f5028c6dd5dc37e0f5573d8872507ba"
[[projects]]
digest = "1:9cc257b3c9ff6a0158c9c661ab6eebda1fe8a4a4453cd5c4044dc9a2ebfb992b"
name = "k8s.io/klog"
packages = ["."]
pruneopts = "NUT"
revision = "a5bc97fbc634d635061f3146511332c7e313a55a"
version = "v0.1.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54" digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f"
name = "k8s.io/kube-openapi" name = "k8s.io/kube-openapi"
packages = ["pkg/util/proto"] packages = ["pkg/util/proto"]
pruneopts = "NUT" pruneopts = "NUT"
revision = "e3762e86a74c878ffed47484592986685639c2cd" revision = "0317810137be915b9cf888946c6e115c1bfac693"
[[projects]] [[projects]]
digest = "1:75158cef6a899b0b047de1cb2b3a585eca459e64e797f99d89261b92674b8c2b" digest = "1:df9bd8d59539e980d5d565b00a6d45e445e62d8c89d60fe0fa520ad6e54d32e4"
name = "k8s.io/kubernetes" name = "k8s.io/kubernetes"
packages = [ packages = [
"pkg/api/legacyscheme", "pkg/api/legacyscheme",
"pkg/api/service",
"pkg/api/v1/pod", "pkg/api/v1/pod",
"pkg/apis/autoscaling",
"pkg/apis/core",
"pkg/apis/core/helper",
"pkg/apis/core/install",
"pkg/apis/core/pods",
"pkg/apis/core/v1",
"pkg/apis/core/v1/helper",
"pkg/apis/core/validation",
"pkg/apis/extensions",
"pkg/apis/networking",
"pkg/apis/policy",
"pkg/apis/scheduling",
"pkg/capabilities",
"pkg/cloudprovider",
"pkg/controller",
"pkg/features",
"pkg/fieldpath",
"pkg/kubelet/apis",
"pkg/kubelet/apis/cri/runtime/v1alpha2", "pkg/kubelet/apis/cri/runtime/v1alpha2",
"pkg/kubelet/container", "pkg/kubelet/container",
"pkg/kubelet/types",
"pkg/kubelet/util/format", "pkg/kubelet/util/format",
"pkg/kubelet/util/sliceutils", "pkg/kubelet/util/sliceutils",
"pkg/master/ports",
"pkg/scheduler/algorithm",
"pkg/scheduler/algorithm/priorities/util",
"pkg/scheduler/api",
"pkg/scheduler/cache",
"pkg/scheduler/util",
"pkg/security/apparmor",
"pkg/serviceaccount",
"pkg/util/file", "pkg/util/file",
"pkg/util/filesystem", "pkg/util/filesystem",
"pkg/util/hash", "pkg/util/hash",
"pkg/util/io", "pkg/util/io",
"pkg/util/mount", "pkg/util/mount",
"pkg/util/net/sets",
"pkg/util/node",
"pkg/util/nsenter", "pkg/util/nsenter",
"pkg/util/parsers",
"pkg/util/sysctl", "pkg/util/sysctl",
"pkg/util/taints",
"pkg/volume", "pkg/volume",
"pkg/volume/util/fs", "pkg/volume/util/fs",
"pkg/volume/util/recyclerclient", "pkg/volume/util/recyclerclient",
"third_party/forked/golang/expansion", "third_party/forked/golang/expansion",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "v1.12.1" revision = "v1.13.0-rc.2"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:b8bb2923aa316490408300d029eeb4f566d54269e91242eeef680c70c2a1c041" digest = "1:381323c2fe2e890a3dd3b5d6dc6f2199068408cca89b24f6b7ca1c60f32644a5"
name = "k8s.io/utils" name = "k8s.io/utils"
packages = [ packages = ["exec"]
"exec",
"pointer",
]
pruneopts = "NUT" pruneopts = "NUT"
revision = "cd34563cd63c2bd7c6fe88a73c4dcf34ed8a67cb" revision = "0d26856f57b32ec3398579285e5c8a2bfe8c5243"
[[projects]]
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
name = "sigs.k8s.io/yaml"
packages = ["."]
pruneopts = "NUT"
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
version = "v1.1.0"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
@ -1189,7 +1210,6 @@
input-imports = [ input-imports = [
"github.com/armon/go-proxyproto", "github.com/armon/go-proxyproto",
"github.com/eapache/channels", "github.com/eapache/channels",
"github.com/golang/glog",
"github.com/imdario/mergo", "github.com/imdario/mergo",
"github.com/json-iterator/go", "github.com/json-iterator/go",
"github.com/kylelemons/godebug/pretty", "github.com/kylelemons/godebug/pretty",
@ -1221,6 +1241,7 @@
"k8s.io/apimachinery/pkg/apis/meta/v1", "k8s.io/apimachinery/pkg/apis/meta/v1",
"k8s.io/apimachinery/pkg/fields", "k8s.io/apimachinery/pkg/fields",
"k8s.io/apimachinery/pkg/labels", "k8s.io/apimachinery/pkg/labels",
"k8s.io/apimachinery/pkg/runtime",
"k8s.io/apimachinery/pkg/runtime/schema", "k8s.io/apimachinery/pkg/runtime/schema",
"k8s.io/apimachinery/pkg/util/intstr", "k8s.io/apimachinery/pkg/util/intstr",
"k8s.io/apimachinery/pkg/util/runtime", "k8s.io/apimachinery/pkg/util/runtime",
@ -1228,6 +1249,7 @@
"k8s.io/apimachinery/pkg/util/uuid", "k8s.io/apimachinery/pkg/util/uuid",
"k8s.io/apimachinery/pkg/util/wait", "k8s.io/apimachinery/pkg/util/wait",
"k8s.io/apimachinery/pkg/version", "k8s.io/apimachinery/pkg/version",
"k8s.io/apimachinery/pkg/watch",
"k8s.io/apiserver/pkg/server/healthz", "k8s.io/apiserver/pkg/server/healthz",
"k8s.io/apiserver/pkg/util/logs", "k8s.io/apiserver/pkg/util/logs",
"k8s.io/client-go/informers", "k8s.io/client-go/informers",
@ -1244,9 +1266,9 @@
"k8s.io/client-go/tools/leaderelection/resourcelock", "k8s.io/client-go/tools/leaderelection/resourcelock",
"k8s.io/client-go/tools/record", "k8s.io/client-go/tools/record",
"k8s.io/client-go/util/cert", "k8s.io/client-go/util/cert",
"k8s.io/client-go/util/cert/triple",
"k8s.io/client-go/util/flowcontrol", "k8s.io/client-go/util/flowcontrol",
"k8s.io/client-go/util/workqueue", "k8s.io/client-go/util/workqueue",
"k8s.io/klog",
"k8s.io/kubernetes/pkg/api/v1/pod", "k8s.io/kubernetes/pkg/api/v1/pod",
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils", "k8s.io/kubernetes/pkg/kubelet/util/sliceutils",
"k8s.io/kubernetes/pkg/util/filesystem", "k8s.io/kubernetes/pkg/util/filesystem",

View file

@ -39,6 +39,10 @@
name = "gopkg.in/fsnotify.v1" name = "gopkg.in/fsnotify.v1"
source = "https://github.com/fsnotify/fsnotify.git" source = "https://github.com/fsnotify/fsnotify.git"
[[override]]
name = "github.com/golang/glog"
source = "k8s.io/klog/glog"
[[constraint]] [[constraint]]
name = "github.com/eapache/channels" name = "github.com/eapache/channels"
branch = "master" branch = "master"
@ -47,10 +51,6 @@
branch = "master" branch = "master"
name = "github.com/armon/go-proxyproto" name = "github.com/armon/go-proxyproto"
[[constraint]]
branch = "master"
name = "github.com/golang/glog"
[[constraint]] [[constraint]]
name = "github.com/imdario/mergo" name = "github.com/imdario/mergo"
version = "0.2.4" version = "0.2.4"
@ -69,7 +69,6 @@
[[constraint]] [[constraint]]
name = "github.com/ncabatoff/process-exporter" name = "github.com/ncabatoff/process-exporter"
source = "github.com/aledbf/process-exporter"
branch = "master" branch = "master"
[[constraint]] [[constraint]]
@ -84,10 +83,6 @@
name = "github.com/prometheus/client_golang" name = "github.com/prometheus/client_golang"
branch = "master" branch = "master"
[[constraint]]
name = "github.com/spf13/pflag"
version = "1.0.0"
[[constraint]] [[constraint]]
name = "github.com/zakjan/cert-chain-resolver" name = "github.com/zakjan/cert-chain-resolver"
version = "1.0.1" version = "1.0.1"
@ -102,24 +97,24 @@
[[constraint]] [[constraint]]
name = "k8s.io/kubernetes" name = "k8s.io/kubernetes"
revision = "v1.12.1" revision = "v1.13.0-rc.2"
[[constraint]] [[constraint]]
name = "k8s.io/api" name = "k8s.io/api"
revision = "kubernetes-1.12.1" revision = "kubernetes-1.13.0-rc.2"
[[constraint]] [[constraint]]
name = "k8s.io/apimachinery" name = "k8s.io/apimachinery"
revision = "kubernetes-1.12.1" revision = "kubernetes-1.13.0-rc.2"
[[constraint]] [[constraint]]
name = "k8s.io/client-go" name = "k8s.io/client-go"
revision = "kubernetes-1.12.1" revision = "kubernetes-1.13.0-rc.2"
[[constraint]] [[constraint]]
name = "k8s.io/apiextensions-apiserver" name = "k8s.io/apiextensions-apiserver"
revision = "kubernetes-1.12.1" revision = "kubernetes-1.13.0-rc.2"
[[constraint]] [[constraint]]
name = "k8s.io/apiserver" name = "k8s.io/apiserver"
revision = "kubernetes-1.12.1" revision = "kubernetes-1.13.0-rc.2"

View file

@ -21,10 +21,10 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/golang/glog"
"github.com/spf13/pflag" "github.com/spf13/pflag"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
"k8s.io/klog"
"k8s.io/ingress-nginx/internal/ingress/annotations/class" "k8s.io/ingress-nginx/internal/ingress/annotations/class"
"k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/annotations/parser"
@ -168,7 +168,7 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en
flag.CommandLine.Parse([]string{}) flag.CommandLine.Parse([]string{})
pflag.VisitAll(func(flag *pflag.Flag) { pflag.VisitAll(func(flag *pflag.Flag) {
glog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
}) })
if *showVersion { if *showVersion {
@ -176,10 +176,10 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en
} }
if *ingressClass != "" { if *ingressClass != "" {
glog.Infof("Watching for Ingress class: %s", *ingressClass) klog.Infof("Watching for Ingress class: %s", *ingressClass)
if *ingressClass != class.DefaultClass { if *ingressClass != class.DefaultClass {
glog.Warningf("Only Ingresses with class %q will be processed by this Ingress controller", *ingressClass) klog.Warningf("Only Ingresses with class %q will be processed by this Ingress controller", *ingressClass)
} }
class.IngressClass = *ingressClass class.IngressClass = *ingressClass
@ -209,7 +209,7 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en
} }
if !*enableSSLChainCompletion { if !*enableSSLChainCompletion {
glog.Warningf("SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false)") klog.Warningf("SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false)")
} }
if *enableSSLChainCompletion && *dynamicCertificatesEnabled { if *enableSSLChainCompletion && *dynamicCertificatesEnabled {

View file

@ -28,7 +28,6 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
@ -38,6 +37,7 @@ import (
"k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/healthz"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
"k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/file"
"k8s.io/ingress-nginx/internal/ingress/controller" "k8s.io/ingress-nginx/internal/ingress/controller"
@ -59,6 +59,8 @@ const (
) )
func main() { func main() {
klog.InitFlags(nil)
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
fmt.Println(version.String()) fmt.Println(version.String())
@ -69,14 +71,14 @@ func main() {
} }
if err != nil { if err != nil {
glog.Fatal(err) klog.Fatal(err)
} }
nginxVersion() nginxVersion()
fs, err := file.NewLocalFS() fs, err := file.NewLocalFS()
if err != nil { if err != nil {
glog.Fatal(err) klog.Fatal(err)
} }
kubeClient, err := createApiserverClient(conf.APIServerHost, conf.KubeConfigFile) kubeClient, err := createApiserverClient(conf.APIServerHost, conf.KubeConfigFile)
@ -87,24 +89,24 @@ func main() {
if len(conf.DefaultService) > 0 { if len(conf.DefaultService) > 0 {
defSvcNs, defSvcName, err := k8s.ParseNameNS(conf.DefaultService) defSvcNs, defSvcName, err := k8s.ParseNameNS(conf.DefaultService)
if err != nil { if err != nil {
glog.Fatal(err) klog.Fatal(err)
} }
_, err = kubeClient.CoreV1().Services(defSvcNs).Get(defSvcName, metav1.GetOptions{}) _, err = kubeClient.CoreV1().Services(defSvcNs).Get(defSvcName, metav1.GetOptions{})
if err != nil { if err != nil {
// TODO (antoineco): compare with error types from k8s.io/apimachinery/pkg/api/errors // TODO (antoineco): compare with error types from k8s.io/apimachinery/pkg/api/errors
if strings.Contains(err.Error(), "cannot get services in the namespace") { if strings.Contains(err.Error(), "cannot get services in the namespace") {
glog.Fatal("✖ The cluster seems to be running with a restrictive Authorization mode and the Ingress controller does not have the required permissions to operate normally.") klog.Fatal("✖ The cluster seems to be running with a restrictive Authorization mode and the Ingress controller does not have the required permissions to operate normally.")
} }
glog.Fatalf("No service with name %v found: %v", conf.DefaultService, err) klog.Fatalf("No service with name %v found: %v", conf.DefaultService, err)
} }
glog.Infof("Validated %v as the default backend.", conf.DefaultService) klog.Infof("Validated %v as the default backend.", conf.DefaultService)
} }
if conf.Namespace != "" { if conf.Namespace != "" {
_, err = kubeClient.CoreV1().Namespaces().Get(conf.Namespace, metav1.GetOptions{}) _, err = kubeClient.CoreV1().Namespaces().Get(conf.Namespace, metav1.GetOptions{})
if err != nil { if err != nil {
glog.Fatalf("No namespace with name %v found: %v", conf.Namespace, err) klog.Fatalf("No namespace with name %v found: %v", conf.Namespace, err)
} }
} }
@ -112,7 +114,7 @@ func main() {
defCert, defKey := ssl.GetFakeSSLCert() defCert, defKey := ssl.GetFakeSSLCert()
c, err := ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{}, fs) c, err := ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{}, fs)
if err != nil { if err != nil {
glog.Fatalf("Error generating self-signed certificate: %v", err) klog.Fatalf("Error generating self-signed certificate: %v", err)
} }
conf.FakeCertificatePath = c.PemFileName conf.FakeCertificatePath = c.PemFileName
@ -132,7 +134,7 @@ func main() {
if conf.EnableMetrics { if conf.EnableMetrics {
mc, err = metric.NewCollector(conf.ListenPorts.Status, reg) mc, err = metric.NewCollector(conf.ListenPorts.Status, reg)
if err != nil { if err != nil {
glog.Fatalf("Error creating prometheus collector: %v", err) klog.Fatalf("Error creating prometheus collector: %v", err)
} }
} }
mc.Start() mc.Start()
@ -163,18 +165,18 @@ func handleSigterm(ngx *controller.NGINXController, exit exiter) {
signalChan := make(chan os.Signal, 1) signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM) signal.Notify(signalChan, syscall.SIGTERM)
<-signalChan <-signalChan
glog.Info("Received SIGTERM, shutting down") klog.Info("Received SIGTERM, shutting down")
exitCode := 0 exitCode := 0
if err := ngx.Stop(); err != nil { if err := ngx.Stop(); err != nil {
glog.Infof("Error during shutdown: %v", err) klog.Infof("Error during shutdown: %v", err)
exitCode = 1 exitCode = 1
} }
glog.Info("Handled quit, awaiting Pod deletion") klog.Info("Handled quit, awaiting Pod deletion")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
glog.Infof("Exiting with %v", exitCode) klog.Infof("Exiting with %v", exitCode)
exit(exitCode) exit(exitCode)
} }
@ -196,7 +198,7 @@ func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Client
cfg.Burst = defaultBurst cfg.Burst = defaultBurst
cfg.ContentType = "application/vnd.kubernetes.protobuf" cfg.ContentType = "application/vnd.kubernetes.protobuf"
glog.Infof("Creating API client for %s", cfg.Host) klog.Infof("Creating API client for %s", cfg.Host)
client, err := kubernetes.NewForConfig(cfg) client, err := kubernetes.NewForConfig(cfg)
if err != nil { if err != nil {
@ -216,7 +218,7 @@ func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Client
var lastErr error var lastErr error
retries := 0 retries := 0
glog.V(2).Info("Trying to discover Kubernetes version") klog.V(2).Info("Trying to discover Kubernetes version")
err = wait.ExponentialBackoff(defaultRetry, func() (bool, error) { err = wait.ExponentialBackoff(defaultRetry, func() (bool, error) {
v, err = client.Discovery().ServerVersion() v, err = client.Discovery().ServerVersion()
@ -225,7 +227,7 @@ func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Client
} }
lastErr = err lastErr = err
glog.V(2).Infof("Unexpected error discovering Kubernetes version (attempt %v): %v", retries, err) klog.V(2).Infof("Unexpected error discovering Kubernetes version (attempt %v): %v", retries, err)
retries++ retries++
return false, nil return false, nil
}) })
@ -237,10 +239,10 @@ func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Client
// this should not happen, warn the user // this should not happen, warn the user
if retries > 0 { if retries > 0 {
glog.Warningf("Initial connection to the Kubernetes API server was retried %d times.", retries) klog.Warningf("Initial connection to the Kubernetes API server was retried %d times.", retries)
} }
glog.Infof("Running in Kubernetes cluster version v%v.%v (%v) - git (%v) commit %v - platform %v", klog.Infof("Running in Kubernetes cluster version v%v.%v (%v) - git (%v) commit %v - platform %v",
v.Major, v.Minor, v.GitVersion, v.GitTreeState, v.GitCommit, v.Platform) v.Major, v.Minor, v.GitVersion, v.GitTreeState, v.GitCommit, v.Platform)
return client, nil return client, nil
@ -248,7 +250,7 @@ func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Client
// Handler for fatal init errors. Prints a verbose error message and exits. // Handler for fatal init errors. Prints a verbose error message and exits.
func handleFatalInitError(err error) { func handleFatalInitError(err error) {
glog.Fatalf("Error while initiating a connection to the Kubernetes API server. "+ klog.Fatalf("Error while initiating a connection to the Kubernetes API server. "+
"This could mean the cluster is misconfigured (e.g. it has invalid API server certificates "+ "This could mean the cluster is misconfigured (e.g. it has invalid API server certificates "+
"or Service Accounts configuration). Reason: %s\n"+ "or Service Accounts configuration). Reason: %s\n"+
"Refer to the troubleshooting guide for more information: "+ "Refer to the troubleshooting guide for more information: "+
@ -266,7 +268,7 @@ func registerHandlers(mux *http.ServeMux) {
mux.HandleFunc("/stop", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/stop", func(w http.ResponseWriter, r *http.Request) {
err := syscall.Kill(syscall.Getpid(), syscall.SIGTERM) err := syscall.Kill(syscall.Getpid(), syscall.SIGTERM)
if err != nil { if err != nil {
glog.Errorf("Unexpected error: %v", err) klog.Errorf("Unexpected error: %v", err)
} }
}) })
} }
@ -312,5 +314,5 @@ func startHTTPServer(port int, mux *http.ServeMux) {
WriteTimeout: 300 * time.Second, WriteTimeout: 300 * time.Second,
IdleTimeout: 120 * time.Second, IdleTimeout: 120 * time.Second,
} }
glog.Fatal(server.ListenAndServe()) klog.Fatal(server.ListenAndServe())
} }

View file

@ -18,17 +18,18 @@ package main
import ( import (
"fmt" "fmt"
"k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/ingress-nginx/internal/file"
"k8s.io/ingress-nginx/internal/ingress/controller"
"os" "os"
"syscall" "syscall"
"testing" "testing"
"time" "time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/ingress-nginx/internal/file"
"k8s.io/ingress-nginx/internal/ingress/controller"
) )
func TestCreateApiserverClient(t *testing.T) { func TestCreateApiserverClient(t *testing.T) {

View file

@ -20,13 +20,13 @@ import (
"os" "os"
"os/exec" "os/exec"
"github.com/golang/glog" "k8s.io/klog"
) )
func nginxVersion() { func nginxVersion() {
flag := "-v" flag := "-v"
if glog.V(2) { if klog.V(2) {
flag = "-V" flag = "-V"
} }

View file

@ -19,8 +19,8 @@ package file
import ( import (
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
"github.com/golang/glog"
"io/ioutil" "io/ioutil"
"k8s.io/klog"
) )
// SHA1 returns the SHA1 of a file. // SHA1 returns the SHA1 of a file.
@ -28,7 +28,7 @@ func SHA1(filename string) string {
hasher := sha1.New() hasher := sha1.New()
s, err := ioutil.ReadFile(filename) s, err := ioutil.ReadFile(filename)
if err != nil { if err != nil {
glog.Errorf("Error reading file %v", err) klog.Errorf("Error reading file %v", err)
return "" return ""
} }

View file

@ -17,11 +17,11 @@ limitations under the License.
package annotations package annotations
import ( import (
"github.com/golang/glog"
"github.com/imdario/mergo" "github.com/imdario/mergo"
"k8s.io/ingress-nginx/internal/ingress/annotations/canary" "k8s.io/ingress-nginx/internal/ingress/annotations/canary"
"k8s.io/ingress-nginx/internal/ingress/annotations/modsecurity" "k8s.io/ingress-nginx/internal/ingress/annotations/modsecurity"
"k8s.io/ingress-nginx/internal/ingress/annotations/sslcipher" "k8s.io/ingress-nginx/internal/ingress/annotations/sslcipher"
"k8s.io/klog"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
@ -156,7 +156,7 @@ func (e Extractor) Extract(ing *extensions.Ingress) *Ingress {
data := make(map[string]interface{}) data := make(map[string]interface{})
for name, annotationParser := range e.annotations { for name, annotationParser := range e.annotations {
val, err := annotationParser.Parse(ing) val, err := annotationParser.Parse(ing)
glog.V(5).Infof("annotation %v in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), val) klog.V(5).Infof("annotation %v in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), val)
if err != nil { if err != nil {
if errors.IsMissingAnnotations(err) { if errors.IsMissingAnnotations(err) {
continue continue
@ -177,11 +177,11 @@ func (e Extractor) Extract(ing *extensions.Ingress) *Ingress {
_, alreadyDenied := data[DeniedKeyName] _, alreadyDenied := data[DeniedKeyName]
if !alreadyDenied { if !alreadyDenied {
data[DeniedKeyName] = err data[DeniedKeyName] = err
glog.Errorf("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err) klog.Errorf("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err)
continue continue
} }
glog.V(5).Infof("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err) klog.V(5).Infof("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err)
} }
if val != nil { if val != nil {
@ -191,7 +191,7 @@ func (e Extractor) Extract(ing *extensions.Ingress) *Ingress {
err := mergo.MapWithOverwrite(pia, data) err := mergo.MapWithOverwrite(pia, data)
if err != nil { if err != nil {
glog.Errorf("unexpected error merging extracted annotations: %v", err) klog.Errorf("unexpected error merging extracted annotations: %v", err)
} }
return pia return pia

View file

@ -21,7 +21,7 @@ import (
"regexp" "regexp"
"strings" "strings"
"github.com/golang/glog" "k8s.io/klog"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
@ -146,12 +146,12 @@ func (a authReq) Parse(ing *extensions.Ingress) (interface{}, error) {
// Optional Parameters // Optional Parameters
signIn, err := parser.GetStringAnnotation("auth-signin", ing) signIn, err := parser.GetStringAnnotation("auth-signin", ing)
if err != nil { if err != nil {
glog.Warning("auth-signin annotation is undefined and will not be set") klog.Warning("auth-signin annotation is undefined and will not be set")
} }
authSnippet, err := parser.GetStringAnnotation("auth-snippet", ing) authSnippet, err := parser.GetStringAnnotation("auth-snippet", ing)
if err != nil { if err != nil {
glog.Warning("auth-snippet annotation is undefined and will not be set") klog.Warning("auth-snippet annotation is undefined and will not be set")
} }
responseHeaders := []string{} responseHeaders := []string{}

View file

@ -20,8 +20,8 @@ import (
"regexp" "regexp"
"strings" "strings"
"github.com/golang/glog"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/klog"
"k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/annotations/parser"
"k8s.io/ingress-nginx/internal/ingress/resolver" "k8s.io/ingress-nginx/internal/ingress/resolver"
@ -57,7 +57,7 @@ func (a backendProtocol) Parse(ing *extensions.Ingress) (interface{}, error) {
proto = strings.TrimSpace(strings.ToUpper(proto)) proto = strings.TrimSpace(strings.ToUpper(proto))
if !validProtocols.MatchString(proto) { if !validProtocols.MatchString(proto) {
glog.Warningf("Protocol %v is not a valid value for the backend-protocol annotation. Using HTTP as protocol", proto) klog.Warningf("Protocol %v is not a valid value for the backend-protocol annotation. Using HTTP as protocol", proto)
return HTTP, nil return HTTP, nil
} }

View file

@ -17,8 +17,8 @@ limitations under the License.
package class package class
import ( import (
"github.com/golang/glog"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/klog"
) )
const ( const (
@ -44,7 +44,7 @@ var (
func IsValid(ing *extensions.Ingress) bool { func IsValid(ing *extensions.Ingress) bool {
ingress, ok := ing.GetAnnotations()[IngressKey] ingress, ok := ing.GetAnnotations()[IngressKey]
if !ok { if !ok {
glog.V(3).Infof("annotation %v is not present in ingress %v/%v", IngressKey, ing.Namespace, ing.Name) klog.V(3).Infof("annotation %v is not present in ingress %v/%v", IngressKey, ing.Namespace, ing.Name)
} }
// we have 2 valid combinations // we have 2 valid combinations

View file

@ -19,9 +19,8 @@ package sessionaffinity
import ( import (
"regexp" "regexp"
"github.com/golang/glog"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/klog"
"k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/annotations/parser"
"k8s.io/ingress-nginx/internal/ingress/resolver" "k8s.io/ingress-nginx/internal/ingress/resolver"
@ -87,31 +86,31 @@ func (a affinity) cookieAffinityParse(ing *extensions.Ingress) *Cookie {
cookie.Name, err = parser.GetStringAnnotation(annotationAffinityCookieName, ing) cookie.Name, err = parser.GetStringAnnotation(annotationAffinityCookieName, ing)
if err != nil { if err != nil {
glog.V(3).Infof("Ingress %v: No value found in annotation %v. Using the default %v", ing.Name, annotationAffinityCookieName, defaultAffinityCookieName) klog.V(3).Infof("Ingress %v: No value found in annotation %v. Using the default %v", ing.Name, annotationAffinityCookieName, defaultAffinityCookieName)
cookie.Name = defaultAffinityCookieName cookie.Name = defaultAffinityCookieName
} }
cookie.Hash, err = parser.GetStringAnnotation(annotationAffinityCookieHash, ing) cookie.Hash, err = parser.GetStringAnnotation(annotationAffinityCookieHash, ing)
if err != nil || !affinityCookieHashRegex.MatchString(cookie.Hash) { if err != nil || !affinityCookieHashRegex.MatchString(cookie.Hash) {
glog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Setting it to default %v", ing.Name, annotationAffinityCookieHash, defaultAffinityCookieHash) klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Setting it to default %v", ing.Name, annotationAffinityCookieHash, defaultAffinityCookieHash)
cookie.Hash = defaultAffinityCookieHash cookie.Hash = defaultAffinityCookieHash
} }
cookie.Expires, err = parser.GetStringAnnotation(annotationAffinityCookieExpires, ing) cookie.Expires, err = parser.GetStringAnnotation(annotationAffinityCookieExpires, ing)
if err != nil || !affinityCookieExpiresRegex.MatchString(cookie.Expires) { if err != nil || !affinityCookieExpiresRegex.MatchString(cookie.Expires) {
glog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieExpires) klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieExpires)
cookie.Expires = "" cookie.Expires = ""
} }
cookie.MaxAge, err = parser.GetStringAnnotation(annotationAffinityCookieMaxAge, ing) cookie.MaxAge, err = parser.GetStringAnnotation(annotationAffinityCookieMaxAge, ing)
if err != nil || !affinityCookieExpiresRegex.MatchString(cookie.MaxAge) { if err != nil || !affinityCookieExpiresRegex.MatchString(cookie.MaxAge) {
glog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieMaxAge) klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieMaxAge)
cookie.MaxAge = "" cookie.MaxAge = ""
} }
cookie.Path, err = parser.GetStringAnnotation(annotationAffinityCookiePath, ing) cookie.Path, err = parser.GetStringAnnotation(annotationAffinityCookiePath, ing)
if err != nil { if err != nil {
glog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieMaxAge) klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieMaxAge)
} }
return cookie return cookie
@ -140,7 +139,7 @@ func (a affinity) Parse(ing *extensions.Ingress) (interface{}, error) {
case "cookie": case "cookie":
cookie = a.cookieAffinityParse(ing) cookie = a.cookieAffinityParse(ing)
default: default:
glog.V(3).Infof("No default affinity was found for Ingress %v", ing.Name) klog.V(3).Infof("No default affinity was found for Ingress %v", ing.Name)
} }

View file

@ -59,7 +59,7 @@ func (n *NGINXController) Check(_ *http.Request) error {
} }
// check the nginx master process is running // check the nginx master process is running
fs, err := proc.NewFS("/proc") fs, err := proc.NewFS("/proc", false)
if err != nil { if err != nil {
return errors.Wrap(err, "unexpected error reading /proc directory") return errors.Wrap(err, "unexpected error reading /proc directory")
} }

View file

@ -21,7 +21,7 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/golang/glog" "k8s.io/klog"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
@ -675,7 +675,7 @@ func NewDefault() Configuration {
NoAuthLocations: "/.well-known/acme-challenge", NoAuthLocations: "/.well-known/acme-challenge",
} }
if glog.V(5) { if klog.V(5) {
cfg.ErrorLogLevel = "debug" cfg.ErrorLogLevel = "debug"
} }

View file

@ -24,8 +24,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/golang/glog"
"github.com/mitchellh/hashstructure" "github.com/mitchellh/hashstructure"
"k8s.io/klog"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
@ -143,7 +143,7 @@ func (n *NGINXController) syncIngress(interface{}) error {
for _, loc := range server.Locations { for _, loc := range server.Locations {
if loc.Path != rootLocation { if loc.Path != rootLocation {
glog.Warningf("Ignoring SSL Passthrough for location %q in server %q", loc.Path, server.Hostname) klog.Warningf("Ignoring SSL Passthrough for location %q in server %q", loc.Path, server.Hostname)
continue continue
} }
passUpstreams = append(passUpstreams, &ingress.SSLPassthroughBackend{ passUpstreams = append(passUpstreams, &ingress.SSLPassthroughBackend{
@ -166,12 +166,12 @@ func (n *NGINXController) syncIngress(interface{}) error {
} }
if n.runningConfig.Equal(pcfg) { if n.runningConfig.Equal(pcfg) {
glog.V(3).Infof("No configuration change detected, skipping backend reload.") klog.V(3).Infof("No configuration change detected, skipping backend reload.")
return nil return nil
} }
if !n.IsDynamicConfigurationEnough(pcfg) { if !n.IsDynamicConfigurationEnough(pcfg) {
glog.Infof("Configuration changes detected, backend reload required.") klog.Infof("Configuration changes detected, backend reload required.")
hash, _ := hashstructure.Hash(pcfg, &hashstructure.HashOptions{ hash, _ := hashstructure.Hash(pcfg, &hashstructure.HashOptions{
TagName: "json", TagName: "json",
@ -183,13 +183,13 @@ func (n *NGINXController) syncIngress(interface{}) error {
if err != nil { if err != nil {
n.metricCollector.IncReloadErrorCount() n.metricCollector.IncReloadErrorCount()
n.metricCollector.ConfigSuccess(hash, false) n.metricCollector.ConfigSuccess(hash, false)
glog.Errorf("Unexpected failure reloading the backend:\n%v", err) klog.Errorf("Unexpected failure reloading the backend:\n%v", err)
return err return err
} }
n.metricCollector.SetHosts(hosts) n.metricCollector.SetHosts(hosts)
glog.Infof("Backend successfully reloaded.") klog.Infof("Backend successfully reloaded.")
n.metricCollector.ConfigSuccess(hash, true) n.metricCollector.ConfigSuccess(hash, true)
n.metricCollector.IncReloadCount() n.metricCollector.IncReloadCount()
n.metricCollector.SetSSLExpireTime(servers) n.metricCollector.SetSSLExpireTime(servers)
@ -201,7 +201,7 @@ func (n *NGINXController) syncIngress(interface{}) error {
// start listening on the configured port (default 18080) // start listening on the configured port (default 18080)
// For large configurations it might take a while so we loop // For large configurations it might take a while so we loop
// and back off // and back off
glog.Info("Initial sync, sleeping for 1 second.") klog.Info("Initial sync, sleeping for 1 second.")
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
@ -215,15 +215,15 @@ func (n *NGINXController) syncIngress(interface{}) error {
err := wait.ExponentialBackoff(retry, func() (bool, error) { err := wait.ExponentialBackoff(retry, func() (bool, error) {
err := configureDynamically(pcfg, n.cfg.ListenPorts.Status, n.cfg.DynamicCertificatesEnabled) err := configureDynamically(pcfg, n.cfg.ListenPorts.Status, n.cfg.DynamicCertificatesEnabled)
if err == nil { if err == nil {
glog.V(2).Infof("Dynamic reconfiguration succeeded.") klog.V(2).Infof("Dynamic reconfiguration succeeded.")
return true, nil return true, nil
} }
glog.Warningf("Dynamic reconfiguration failed: %v", err) klog.Warningf("Dynamic reconfiguration failed: %v", err)
return false, err return false, err
}) })
if err != nil { if err != nil {
glog.Errorf("Unexpected failure reconfiguring NGINX:\n%v", err) klog.Errorf("Unexpected failure reconfiguring NGINX:\n%v", err)
return err return err
} }
@ -240,15 +240,15 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
if configmapName == "" { if configmapName == "" {
return []ingress.L4Service{} return []ingress.L4Service{}
} }
glog.V(3).Infof("Obtaining information about %v stream services from ConfigMap %q", proto, configmapName) klog.V(3).Infof("Obtaining information about %v stream services from ConfigMap %q", proto, configmapName)
_, _, err := k8s.ParseNameNS(configmapName) _, _, err := k8s.ParseNameNS(configmapName)
if err != nil { if err != nil {
glog.Errorf("Error parsing ConfigMap reference %q: %v", configmapName, err) klog.Errorf("Error parsing ConfigMap reference %q: %v", configmapName, err)
return []ingress.L4Service{} return []ingress.L4Service{}
} }
configmap, err := n.store.GetConfigMap(configmapName) configmap, err := n.store.GetConfigMap(configmapName)
if err != nil { if err != nil {
glog.Errorf("Error getting ConfigMap %q: %v", configmapName, err) klog.Errorf("Error getting ConfigMap %q: %v", configmapName, err)
return []ingress.L4Service{} return []ingress.L4Service{}
} }
var svcs []ingress.L4Service var svcs []ingress.L4Service
@ -266,16 +266,16 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
for port, svcRef := range configmap.Data { for port, svcRef := range configmap.Data {
externalPort, err := strconv.Atoi(port) externalPort, err := strconv.Atoi(port)
if err != nil { if err != nil {
glog.Warningf("%q is not a valid %v port number", port, proto) klog.Warningf("%q is not a valid %v port number", port, proto)
continue continue
} }
if reserverdPorts.Has(externalPort) { if reserverdPorts.Has(externalPort) {
glog.Warningf("Port %d cannot be used for %v stream services. It is reserved for the Ingress controller.", externalPort, proto) klog.Warningf("Port %d cannot be used for %v stream services. It is reserved for the Ingress controller.", externalPort, proto)
continue continue
} }
nsSvcPort := strings.Split(svcRef, ":") nsSvcPort := strings.Split(svcRef, ":")
if len(nsSvcPort) < 2 { if len(nsSvcPort) < 2 {
glog.Warningf("Invalid Service reference %q for %v port %d", svcRef, proto, externalPort) klog.Warningf("Invalid Service reference %q for %v port %d", svcRef, proto, externalPort)
continue continue
} }
nsName := nsSvcPort[0] nsName := nsSvcPort[0]
@ -293,19 +293,19 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
} }
svcNs, svcName, err := k8s.ParseNameNS(nsName) svcNs, svcName, err := k8s.ParseNameNS(nsName)
if err != nil { if err != nil {
glog.Warningf("%v", err) klog.Warningf("%v", err)
continue continue
} }
svc, err := n.store.GetService(nsName) svc, err := n.store.GetService(nsName)
if err != nil { if err != nil {
glog.Warningf("Error getting Service %q: %v", nsName, err) klog.Warningf("Error getting Service %q: %v", nsName, err)
continue continue
} }
var endps []ingress.Endpoint var endps []ingress.Endpoint
targetPort, err := strconv.Atoi(svcPort) targetPort, err := strconv.Atoi(svcPort)
if err != nil { if err != nil {
// not a port number, fall back to using port name // not a port number, fall back to using port name
glog.V(3).Infof("Searching Endpoints with %v port name %q for Service %q", proto, svcPort, nsName) klog.V(3).Infof("Searching Endpoints with %v port name %q for Service %q", proto, svcPort, nsName)
for _, sp := range svc.Spec.Ports { for _, sp := range svc.Spec.Ports {
if sp.Name == svcPort { if sp.Name == svcPort {
if sp.Protocol == proto { if sp.Protocol == proto {
@ -315,7 +315,7 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
} }
} }
} else { } else {
glog.V(3).Infof("Searching Endpoints with %v port number %d for Service %q", proto, targetPort, nsName) klog.V(3).Infof("Searching Endpoints with %v port number %d for Service %q", proto, targetPort, nsName)
for _, sp := range svc.Spec.Ports { for _, sp := range svc.Spec.Ports {
if sp.Port == int32(targetPort) { if sp.Port == int32(targetPort) {
if sp.Protocol == proto { if sp.Protocol == proto {
@ -328,7 +328,7 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr
// stream services cannot contain empty upstreams and there is // stream services cannot contain empty upstreams and there is
// no default backend equivalent // no default backend equivalent
if len(endps) == 0 { if len(endps) == 0 {
glog.Warningf("Service %q does not have any active Endpoint for %v port %v", nsName, proto, svcPort) klog.Warningf("Service %q does not have any active Endpoint for %v port %v", nsName, proto, svcPort)
continue continue
} }
svcs = append(svcs, ingress.L4Service{ svcs = append(svcs, ingress.L4Service{
@ -365,14 +365,14 @@ func (n *NGINXController) getDefaultUpstream() *ingress.Backend {
svc, err := n.store.GetService(svcKey) svc, err := n.store.GetService(svcKey)
if err != nil { if err != nil {
glog.Warningf("Error getting default backend %q: %v", svcKey, err) klog.Warningf("Error getting default backend %q: %v", svcKey, err)
upstream.Endpoints = append(upstream.Endpoints, n.DefaultEndpoint()) upstream.Endpoints = append(upstream.Endpoints, n.DefaultEndpoint())
return upstream return upstream
} }
endps := getEndpoints(svc, &svc.Spec.Ports[0], apiv1.ProtocolTCP, n.store.GetServiceEndpoints) endps := getEndpoints(svc, &svc.Spec.Ports[0], apiv1.ProtocolTCP, n.store.GetServiceEndpoints)
if len(endps) == 0 { if len(endps) == 0 {
glog.Warningf("Service %q does not have any active Endpoint", svcKey) klog.Warningf("Service %q does not have any active Endpoint", svcKey)
endps = []ingress.Endpoint{n.DefaultEndpoint()} endps = []ingress.Endpoint{n.DefaultEndpoint()}
} }
@ -406,7 +406,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in
if rule.HTTP == nil && if rule.HTTP == nil &&
host != defServerName { host != defServerName {
glog.V(3).Infof("Ingress %q does not contain any HTTP rule, using default backend", ingKey) klog.V(3).Infof("Ingress %q does not contain any HTTP rule, using default backend", ingKey)
continue continue
} }
@ -417,16 +417,16 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in
if server.CertificateAuth.CAFileName == "" { if server.CertificateAuth.CAFileName == "" {
server.CertificateAuth = anns.CertificateAuth server.CertificateAuth = anns.CertificateAuth
if server.CertificateAuth.Secret != "" && server.CertificateAuth.CAFileName == "" { if server.CertificateAuth.Secret != "" && server.CertificateAuth.CAFileName == "" {
glog.V(3).Infof("Secret %q has no 'ca.crt' key, mutual authentication disabled for Ingress %q", klog.V(3).Infof("Secret %q has no 'ca.crt' key, mutual authentication disabled for Ingress %q",
server.CertificateAuth.Secret, ingKey) server.CertificateAuth.Secret, ingKey)
} }
} else { } else {
glog.V(3).Infof("Server %q is already configured for mutual authentication (Ingress %q)", klog.V(3).Infof("Server %q is already configured for mutual authentication (Ingress %q)",
server.Hostname, ingKey) server.Hostname, ingKey)
} }
if rule.HTTP == nil { if rule.HTTP == nil {
glog.V(3).Infof("Ingress %q does not contain any HTTP rule, using default backend", ingKey) klog.V(3).Infof("Ingress %q does not contain any HTTP rule, using default backend", ingKey)
continue continue
} }
@ -451,12 +451,12 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in
addLoc = false addLoc = false
if !loc.IsDefBackend { if !loc.IsDefBackend {
glog.V(3).Infof("Location %q already configured for server %q with upstream %q (Ingress %q)", klog.V(3).Infof("Location %q already configured for server %q with upstream %q (Ingress %q)",
loc.Path, server.Hostname, loc.Backend, ingKey) loc.Path, server.Hostname, loc.Backend, ingKey)
break break
} }
glog.V(3).Infof("Replacing location %q for server %q with upstream %q to use upstream %q (Ingress %q)", klog.V(3).Infof("Replacing location %q for server %q with upstream %q to use upstream %q (Ingress %q)",
loc.Path, server.Hostname, loc.Backend, ups.Name, ingKey) loc.Path, server.Hostname, loc.Backend, ups.Name, ingKey)
loc.Backend = ups.Name loc.Backend = ups.Name
@ -496,7 +496,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in
// new location // new location
if addLoc { if addLoc {
glog.V(3).Infof("Adding location %q for server %q with upstream %q (Ingress %q)", klog.V(3).Infof("Adding location %q for server %q with upstream %q (Ingress %q)",
nginxPath, server.Hostname, ups.Name, ingKey) nginxPath, server.Hostname, ups.Name, ingKey)
loc := &ingress.Location{ loc := &ingress.Location{
@ -543,7 +543,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in
if anns.SessionAffinity.Type == "cookie" { if anns.SessionAffinity.Type == "cookie" {
cookiePath := anns.SessionAffinity.Cookie.Path cookiePath := anns.SessionAffinity.Cookie.Path
if anns.Rewrite.UseRegex && cookiePath == "" { if anns.Rewrite.UseRegex && cookiePath == "" {
glog.Warningf("session-cookie-path should be set when use-regex is true") klog.Warningf("session-cookie-path should be set when use-regex is true")
} }
ups.SessionAffinity.CookieSessionAffinity.Name = anns.SessionAffinity.Cookie.Name ups.SessionAffinity.CookieSessionAffinity.Name = anns.SessionAffinity.Cookie.Name
@ -562,7 +562,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in
} }
if anns.Canary.Enabled { if anns.Canary.Enabled {
glog.Infof("Canary ingress %v detected. Finding eligible backends to merge into.", ing.Name) klog.Infof("Canary ingress %v detected. Finding eligible backends to merge into.", ing.Name)
mergeAlternativeBackends(ing, upstreams, servers) mergeAlternativeBackends(ing, upstreams, servers)
} }
} }
@ -577,13 +577,13 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in
for _, location := range server.Locations { for _, location := range server.Locations {
if upstream.Name == location.Backend { if upstream.Name == location.Backend {
if len(upstream.Endpoints) == 0 { if len(upstream.Endpoints) == 0 {
glog.V(3).Infof("Upstream %q has no active Endpoint", upstream.Name) klog.V(3).Infof("Upstream %q has no active Endpoint", upstream.Name)
// check if the location contains endpoints and a custom default backend // check if the location contains endpoints and a custom default backend
if location.DefaultBackend != nil { if location.DefaultBackend != nil {
sp := location.DefaultBackend.Spec.Ports[0] sp := location.DefaultBackend.Spec.Ports[0]
endps := getEndpoints(location.DefaultBackend, &sp, apiv1.ProtocolTCP, n.store.GetServiceEndpoints) endps := getEndpoints(location.DefaultBackend, &sp, apiv1.ProtocolTCP, n.store.GetServiceEndpoints)
if len(endps) > 0 { if len(endps) > 0 {
glog.V(3).Infof("Using custom default backend for location %q in server %q (Service \"%v/%v\")", klog.V(3).Infof("Using custom default backend for location %q in server %q (Service \"%v/%v\")",
location.Path, server.Hostname, location.DefaultBackend.Namespace, location.DefaultBackend.Name) location.Path, server.Hostname, location.DefaultBackend.Namespace, location.DefaultBackend.Name)
nb := upstream.DeepCopy() nb := upstream.DeepCopy()
@ -599,7 +599,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in
if server.SSLPassthrough { if server.SSLPassthrough {
if location.Path == rootLocation { if location.Path == rootLocation {
if location.Backend == defUpstreamName { if location.Backend == defUpstreamName {
glog.Warningf("Server %q has no default backend, ignoring SSL Passthrough.", server.Hostname) klog.Warningf("Server %q has no default backend, ignoring SSL Passthrough.", server.Hostname)
continue continue
} }
isHTTPSfrom = append(isHTTPSfrom, server) isHTTPSfrom = append(isHTTPSfrom, server)
@ -650,7 +650,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B
if ing.Spec.Backend != nil { if ing.Spec.Backend != nil {
defBackend = upstreamName(ing.Namespace, ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort) defBackend = upstreamName(ing.Namespace, ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort)
glog.V(3).Infof("Creating upstream %q", defBackend) klog.V(3).Infof("Creating upstream %q", defBackend)
upstreams[defBackend] = newUpstream(defBackend) upstreams[defBackend] = newUpstream(defBackend)
if upstreams[defBackend].SecureCACert.Secret == "" { if upstreams[defBackend].SecureCACert.Secret == "" {
upstreams[defBackend].SecureCACert = anns.SecureUpstream.CACert upstreams[defBackend].SecureCACert = anns.SecureUpstream.CACert
@ -668,7 +668,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B
if anns.ServiceUpstream { if anns.ServiceUpstream {
endpoint, err := n.getServiceClusterEndpoint(svcKey, ing.Spec.Backend) endpoint, err := n.getServiceClusterEndpoint(svcKey, ing.Spec.Backend)
if err != nil { if err != nil {
glog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err) klog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err)
} else { } else {
upstreams[defBackend].Endpoints = []ingress.Endpoint{endpoint} upstreams[defBackend].Endpoints = []ingress.Endpoint{endpoint}
} }
@ -688,13 +688,13 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B
endps, err := n.serviceEndpoints(svcKey, ing.Spec.Backend.ServicePort.String()) endps, err := n.serviceEndpoints(svcKey, ing.Spec.Backend.ServicePort.String())
upstreams[defBackend].Endpoints = append(upstreams[defBackend].Endpoints, endps...) upstreams[defBackend].Endpoints = append(upstreams[defBackend].Endpoints, endps...)
if err != nil { if err != nil {
glog.Warningf("Error creating upstream %q: %v", defBackend, err) klog.Warningf("Error creating upstream %q: %v", defBackend, err)
} }
} }
s, err := n.store.GetService(svcKey) s, err := n.store.GetService(svcKey)
if err != nil { if err != nil {
glog.Warningf("Error obtaining Service %q: %v", svcKey, err) klog.Warningf("Error obtaining Service %q: %v", svcKey, err)
} }
upstreams[defBackend].Service = s upstreams[defBackend].Service = s
} }
@ -711,7 +711,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B
continue continue
} }
glog.V(3).Infof("Creating upstream %q", name) klog.V(3).Infof("Creating upstream %q", name)
upstreams[name] = newUpstream(name) upstreams[name] = newUpstream(name)
upstreams[name].Port = path.Backend.ServicePort upstreams[name].Port = path.Backend.ServicePort
@ -733,7 +733,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B
if anns.ServiceUpstream { if anns.ServiceUpstream {
endpoint, err := n.getServiceClusterEndpoint(svcKey, &path.Backend) endpoint, err := n.getServiceClusterEndpoint(svcKey, &path.Backend)
if err != nil { if err != nil {
glog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err) klog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err)
} else { } else {
upstreams[name].Endpoints = []ingress.Endpoint{endpoint} upstreams[name].Endpoints = []ingress.Endpoint{endpoint}
} }
@ -752,7 +752,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B
if len(upstreams[name].Endpoints) == 0 { if len(upstreams[name].Endpoints) == 0 {
endp, err := n.serviceEndpoints(svcKey, path.Backend.ServicePort.String()) endp, err := n.serviceEndpoints(svcKey, path.Backend.ServicePort.String())
if err != nil { if err != nil {
glog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err) klog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err)
continue continue
} }
upstreams[name].Endpoints = endp upstreams[name].Endpoints = endp
@ -760,7 +760,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B
s, err := n.store.GetService(svcKey) s, err := n.store.GetService(svcKey)
if err != nil { if err != nil {
glog.Warningf("Error obtaining Service %q: %v", svcKey, err) klog.Warningf("Error obtaining Service %q: %v", svcKey, err)
continue continue
} }
@ -816,7 +816,7 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string) ([]ingres
return upstreams, err return upstreams, err
} }
glog.V(3).Infof("Obtaining ports information for Service %q", svcKey) klog.V(3).Infof("Obtaining ports information for Service %q", svcKey)
for _, servicePort := range svc.Spec.Ports { for _, servicePort := range svc.Spec.Ports {
// targetPort could be a string, use either the port name or number (int) // targetPort could be a string, use either the port name or number (int)
if strconv.Itoa(int(servicePort.Port)) == backendPort || if strconv.Itoa(int(servicePort.Port)) == backendPort ||
@ -825,7 +825,7 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string) ([]ingres
endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, n.store.GetServiceEndpoints) endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, n.store.GetServiceEndpoints)
if len(endps) == 0 { if len(endps) == 0 {
glog.Warningf("Service %q does not have any active Endpoint.", svcKey) klog.Warningf("Service %q does not have any active Endpoint.", svcKey)
} }
if n.cfg.SortBackends { if n.cfg.SortBackends {
@ -848,7 +848,7 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string) ([]ingres
if len(svc.Spec.Ports) == 0 && svc.Spec.Type == apiv1.ServiceTypeExternalName { if len(svc.Spec.Ports) == 0 && svc.Spec.Type == apiv1.ServiceTypeExternalName {
externalPort, err := strconv.Atoi(backendPort) externalPort, err := strconv.Atoi(backendPort)
if err != nil { if err != nil {
glog.Warningf("Only numeric ports are allowed in ExternalName Services: %q is not a valid port number.", backendPort) klog.Warningf("Only numeric ports are allowed in ExternalName Services: %q is not a valid port number.", backendPort)
return upstreams, nil return upstreams, nil
} }
@ -859,7 +859,7 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string) ([]ingres
} }
endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, n.store.GetServiceEndpoints) endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, n.store.GetServiceEndpoints)
if len(endps) == 0 { if len(endps) == 0 {
glog.Warningf("Service %q does not have any active Endpoint.", svcKey) klog.Warningf("Service %q does not have any active Endpoint.", svcKey)
return upstreams, nil return upstreams, nil
} }
@ -950,7 +950,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress,
// special "catch all" case, Ingress with a backend but no rule // special "catch all" case, Ingress with a backend but no rule
defLoc := servers[defServerName].Locations[0] defLoc := servers[defServerName].Locations[0]
if defLoc.IsDefBackend && len(ing.Spec.Rules) == 0 { if defLoc.IsDefBackend && len(ing.Spec.Rules) == 0 {
glog.Infof("Ingress %q defines a backend but no rule. Using it to configure the catch-all server %q", klog.Infof("Ingress %q defines a backend but no rule. Using it to configure the catch-all server %q",
ingKey, defServerName) ingKey, defServerName)
defLoc.IsDefBackend = false defLoc.IsDefBackend = false
@ -978,7 +978,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress,
defLoc.BackendProtocol = anns.BackendProtocol defLoc.BackendProtocol = anns.BackendProtocol
defLoc.ModSecurity = anns.ModSecurity defLoc.ModSecurity = anns.ModSecurity
} else { } else {
glog.V(3).Infof("Ingress %q defines both a backend and rules. Using its backend as default upstream for all its rules.", klog.V(3).Infof("Ingress %q defines both a backend and rules. Using its backend as default upstream for all its rules.",
ingKey) ingKey)
} }
} }
@ -1029,7 +1029,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress,
aliases["Alias"] = host aliases["Alias"] = host
} }
} else { } else {
glog.Warningf("Aliases already configured for server %q, skipping (Ingress %q)", klog.Warningf("Aliases already configured for server %q, skipping (Ingress %q)",
host, ingKey) host, ingKey)
} }
} }
@ -1038,7 +1038,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress,
if servers[host].ServerSnippet == "" { if servers[host].ServerSnippet == "" {
servers[host].ServerSnippet = anns.ServerSnippet servers[host].ServerSnippet = anns.ServerSnippet
} else { } else {
glog.Warningf("Server snippet already configured for server %q, skipping (Ingress %q)", klog.Warningf("Server snippet already configured for server %q, skipping (Ingress %q)",
host, ingKey) host, ingKey)
} }
} }
@ -1054,14 +1054,14 @@ func (n *NGINXController) createServers(data []*ingress.Ingress,
} }
if len(ing.Spec.TLS) == 0 { if len(ing.Spec.TLS) == 0 {
glog.V(3).Infof("Ingress %q does not contains a TLS section.", ingKey) klog.V(3).Infof("Ingress %q does not contains a TLS section.", ingKey)
continue continue
} }
tlsSecretName := extractTLSSecretName(host, ing, n.store.GetLocalSSLCert) tlsSecretName := extractTLSSecretName(host, ing, n.store.GetLocalSSLCert)
if tlsSecretName == "" { if tlsSecretName == "" {
glog.V(3).Infof("Host %q is listed in the TLS section but secretName is empty. Using default certificate.", host) klog.V(3).Infof("Host %q is listed in the TLS section but secretName is empty. Using default certificate.", host)
servers[host].SSLCert.PemFileName = defaultPemFileName servers[host].SSLCert.PemFileName = defaultPemFileName
servers[host].SSLCert.PemSHA = defaultPemSHA servers[host].SSLCert.PemSHA = defaultPemSHA
continue continue
@ -1070,7 +1070,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress,
secrKey := fmt.Sprintf("%v/%v", ing.Namespace, tlsSecretName) secrKey := fmt.Sprintf("%v/%v", ing.Namespace, tlsSecretName)
cert, err := n.store.GetLocalSSLCert(secrKey) cert, err := n.store.GetLocalSSLCert(secrKey)
if err != nil { if err != nil {
glog.Warningf("Error getting SSL certificate %q: %v. Using default certificate", secrKey, err) klog.Warningf("Error getting SSL certificate %q: %v. Using default certificate", secrKey, err)
servers[host].SSLCert.PemFileName = defaultPemFileName servers[host].SSLCert.PemFileName = defaultPemFileName
servers[host].SSLCert.PemSHA = defaultPemSHA servers[host].SSLCert.PemSHA = defaultPemSHA
continue continue
@ -1078,15 +1078,15 @@ func (n *NGINXController) createServers(data []*ingress.Ingress,
err = cert.Certificate.VerifyHostname(host) err = cert.Certificate.VerifyHostname(host)
if err != nil { if err != nil {
glog.Warningf("Unexpected error validating SSL certificate %q for server %q: %v", secrKey, host, err) klog.Warningf("Unexpected error validating SSL certificate %q for server %q: %v", secrKey, host, err)
glog.Warning("Validating certificate against DNS names. This will be deprecated in a future version.") klog.Warning("Validating certificate against DNS names. This will be deprecated in a future version.")
// check the Common Name field // check the Common Name field
// https://github.com/golang/go/issues/22922 // https://github.com/golang/go/issues/22922
err := verifyHostname(host, cert.Certificate) err := verifyHostname(host, cert.Certificate)
if err != nil { if err != nil {
glog.Warningf("SSL certificate %q does not contain a Common Name or Subject Alternative Name for server %q: %v", klog.Warningf("SSL certificate %q does not contain a Common Name or Subject Alternative Name for server %q: %v",
secrKey, host, err) secrKey, host, err)
glog.Warningf("Using default certificate") klog.Warningf("Using default certificate")
servers[host].SSLCert.PemFileName = defaultPemFileName servers[host].SSLCert.PemFileName = defaultPemFileName
servers[host].SSLCert.PemSHA = defaultPemSHA servers[host].SSLCert.PemSHA = defaultPemSHA
continue continue
@ -1102,14 +1102,14 @@ func (n *NGINXController) createServers(data []*ingress.Ingress,
servers[host].SSLCert = *cert servers[host].SSLCert = *cert
if cert.ExpireTime.Before(time.Now().Add(240 * time.Hour)) { if cert.ExpireTime.Before(time.Now().Add(240 * time.Hour)) {
glog.Warningf("SSL certificate for server %q is about to expire (%v)", host, cert.ExpireTime) klog.Warningf("SSL certificate for server %q is about to expire (%v)", host, cert.ExpireTime)
} }
} }
} }
for alias, host := range aliases { for alias, host := range aliases {
if _, ok := servers[alias]; ok { if _, ok := servers[alias]; ok {
glog.Warningf("Conflicting hostname (%v) and alias (%v). Removing alias to avoid conflicts.", host, alias) klog.Warningf("Conflicting hostname (%v) and alias (%v). Removing alias to avoid conflicts.", host, alias)
servers[host].Alias = "" servers[host].Alias = ""
} }
} }
@ -1124,7 +1124,7 @@ func canMergeBackend(primary *ingress.Backend, alternative *ingress.Backend) boo
// Performs the merge action and checks to ensure that one two alternative backends do not merge into each other // Performs the merge action and checks to ensure that one two alternative backends do not merge into each other
func mergeAlternativeBackend(priUps *ingress.Backend, altUps *ingress.Backend) bool { func mergeAlternativeBackend(priUps *ingress.Backend, altUps *ingress.Backend) bool {
if priUps.NoServer { if priUps.NoServer {
glog.Warningf("unable to merge alternative backend %v into primary backend %v because %v is a primary backend", klog.Warningf("unable to merge alternative backend %v into primary backend %v because %v is a primary backend",
altUps.Name, priUps.Name, priUps.Name) altUps.Name, priUps.Name, priUps.Name)
return false return false
} }
@ -1154,7 +1154,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres
priUps := upstreams[loc.Backend] priUps := upstreams[loc.Backend]
if canMergeBackend(priUps, altUps) { if canMergeBackend(priUps, altUps) {
glog.Infof("matching backend %v found for alternative backend %v", klog.Infof("matching backend %v found for alternative backend %v",
priUps.Name, altUps.Name) priUps.Name, altUps.Name)
merged = mergeAlternativeBackend(priUps, altUps) merged = mergeAlternativeBackend(priUps, altUps)
@ -1162,7 +1162,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres
} }
if !merged { if !merged {
glog.Warningf("unable to find real backend for alternative backend %v. Deleting.", altUps.Name) klog.Warningf("unable to find real backend for alternative backend %v. Deleting.", altUps.Name)
delete(upstreams, altUps.Name) delete(upstreams, altUps.Name)
} }
} }
@ -1177,7 +1177,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres
server, ok := servers[rule.Host] server, ok := servers[rule.Host]
if !ok { if !ok {
glog.Errorf("cannot merge alternative backend %s into hostname %s that does not exist", klog.Errorf("cannot merge alternative backend %s into hostname %s that does not exist",
altUps.Name, altUps.Name,
rule.Host) rule.Host)
@ -1189,7 +1189,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres
priUps := upstreams[loc.Backend] priUps := upstreams[loc.Backend]
if canMergeBackend(priUps, altUps) && loc.Path == path.Path { if canMergeBackend(priUps, altUps) && loc.Path == path.Path {
glog.Infof("matching backend %v found for alternative backend %v", klog.Infof("matching backend %v found for alternative backend %v",
priUps.Name, altUps.Name) priUps.Name, altUps.Name)
merged = mergeAlternativeBackend(priUps, altUps) merged = mergeAlternativeBackend(priUps, altUps)
@ -1197,7 +1197,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres
} }
if !merged { if !merged {
glog.Warningf("unable to find real backend for alternative backend %v. Deleting.", altUps.Name) klog.Warningf("unable to find real backend for alternative backend %v. Deleting.", altUps.Name)
delete(upstreams, altUps.Name) delete(upstreams, altUps.Name)
} }
} }
@ -1232,7 +1232,7 @@ func extractTLSSecretName(host string, ing *ingress.Ingress,
cert, err := getLocalSSLCert(secrKey) cert, err := getLocalSSLCert(secrKey)
if err != nil { if err != nil {
glog.Warningf("Error getting SSL certificate %q: %v", secrKey, err) klog.Warningf("Error getting SSL certificate %q: %v", secrKey, err)
continue continue
} }
@ -1244,7 +1244,7 @@ func extractTLSSecretName(host string, ing *ingress.Ingress,
if err != nil { if err != nil {
continue continue
} }
glog.V(3).Infof("Found SSL certificate matching host %q: %q", host, secrKey) klog.V(3).Infof("Found SSL certificate matching host %q: %q", host, secrKey)
return tls.SecretName return tls.SecretName
} }

View file

@ -22,7 +22,7 @@ import (
"reflect" "reflect"
"strconv" "strconv"
"github.com/golang/glog" "k8s.io/klog"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
@ -48,18 +48,18 @@ func getEndpoints(s *corev1.Service, port *corev1.ServicePort, proto corev1.Prot
// ExternalName services // ExternalName services
if s.Spec.Type == corev1.ServiceTypeExternalName { if s.Spec.Type == corev1.ServiceTypeExternalName {
glog.V(3).Infof("Ingress using Service %q of type ExternalName.", svcKey) klog.V(3).Infof("Ingress using Service %q of type ExternalName.", svcKey)
targetPort := port.TargetPort.IntValue() targetPort := port.TargetPort.IntValue()
if targetPort <= 0 { if targetPort <= 0 {
glog.Errorf("ExternalName Service %q has an invalid port (%v)", svcKey, targetPort) klog.Errorf("ExternalName Service %q has an invalid port (%v)", svcKey, targetPort)
return upsServers return upsServers
} }
if net.ParseIP(s.Spec.ExternalName) == nil { if net.ParseIP(s.Spec.ExternalName) == nil {
_, err := net.LookupHost(s.Spec.ExternalName) _, err := net.LookupHost(s.Spec.ExternalName)
if err != nil { if err != nil {
glog.Errorf("Error resolving host %q: %v", s.Spec.ExternalName, err) klog.Errorf("Error resolving host %q: %v", s.Spec.ExternalName, err)
return upsServers return upsServers
} }
} }
@ -70,10 +70,10 @@ func getEndpoints(s *corev1.Service, port *corev1.ServicePort, proto corev1.Prot
}) })
} }
glog.V(3).Infof("Getting Endpoints for Service %q and port %v", svcKey, port.String()) klog.V(3).Infof("Getting Endpoints for Service %q and port %v", svcKey, port.String())
ep, err := getServiceEndpoints(svcKey) ep, err := getServiceEndpoints(svcKey)
if err != nil { if err != nil {
glog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err) klog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err)
return upsServers return upsServers
} }
@ -113,6 +113,6 @@ func getEndpoints(s *corev1.Service, port *corev1.ServicePort, proto corev1.Prot
} }
} }
glog.V(3).Infof("Endpoints found for Service %q: %v", svcKey, upsServers) klog.V(3).Infof("Endpoints found for Service %q: %v", svcKey, upsServers)
return upsServers return upsServers
} }

View file

@ -34,8 +34,6 @@ import (
"text/template" "text/template"
"time" "time"
"github.com/golang/glog"
proxyproto "github.com/armon/go-proxyproto" proxyproto "github.com/armon/go-proxyproto"
"github.com/eapache/channels" "github.com/eapache/channels"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
@ -44,6 +42,7 @@ import (
v1core "k8s.io/client-go/kubernetes/typed/core/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/flowcontrol"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/filesystem" "k8s.io/kubernetes/pkg/util/filesystem"
"k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/file"
@ -76,14 +75,14 @@ var (
// NewNGINXController creates a new NGINX Ingress controller. // NewNGINXController creates a new NGINX Ingress controller.
func NewNGINXController(config *Configuration, mc metric.Collector, fs file.Filesystem) *NGINXController { func NewNGINXController(config *Configuration, mc metric.Collector, fs file.Filesystem) *NGINXController {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
Interface: config.Client.CoreV1().Events(config.Namespace), Interface: config.Client.CoreV1().Events(config.Namespace),
}) })
h, err := dns.GetSystemNameServers() h, err := dns.GetSystemNameServers()
if err != nil { if err != nil {
glog.Warningf("Error reading system nameservers: %v", err) klog.Warningf("Error reading system nameservers: %v", err)
} }
n := &NGINXController{ n := &NGINXController{
@ -113,7 +112,7 @@ func NewNGINXController(config *Configuration, mc metric.Collector, fs file.File
pod, err := k8s.GetPodDetails(config.Client) pod, err := k8s.GetPodDetails(config.Client)
if err != nil { if err != nil {
glog.Fatalf("unexpected error obtaining pod information: %v", err) klog.Fatalf("unexpected error obtaining pod information: %v", err)
} }
n.store = store.New( n.store = store.New(
@ -147,14 +146,14 @@ func NewNGINXController(config *Configuration, mc metric.Collector, fs file.File
UseNodeInternalIP: config.UseNodeInternalIP, UseNodeInternalIP: config.UseNodeInternalIP,
}) })
} else { } else {
glog.Warning("Update of Ingress status is disabled (flag --update-status)") klog.Warning("Update of Ingress status is disabled (flag --update-status)")
} }
onTemplateChange := func() { onTemplateChange := func() {
template, err := ngx_template.NewTemplate(tmplPath, fs) template, err := ngx_template.NewTemplate(tmplPath, fs)
if err != nil { if err != nil {
// this error is different from the rest because it must be clear why nginx is not working // this error is different from the rest because it must be clear why nginx is not working
glog.Errorf(` klog.Errorf(`
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
Error loading new template: %v Error loading new template: %v
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
@ -163,13 +162,13 @@ Error loading new template: %v
} }
n.t = template n.t = template
glog.Info("New NGINX configuration template loaded.") klog.Info("New NGINX configuration template loaded.")
n.syncQueue.EnqueueTask(task.GetDummyObject("template-change")) n.syncQueue.EnqueueTask(task.GetDummyObject("template-change"))
} }
ngxTpl, err := ngx_template.NewTemplate(tmplPath, fs) ngxTpl, err := ngx_template.NewTemplate(tmplPath, fs)
if err != nil { if err != nil {
glog.Fatalf("Invalid NGINX configuration template: %v", err) klog.Fatalf("Invalid NGINX configuration template: %v", err)
} }
n.t = ngxTpl n.t = ngxTpl
@ -181,7 +180,7 @@ Error loading new template: %v
_, err = watch.NewFileWatcher(tmplPath, onTemplateChange) _, err = watch.NewFileWatcher(tmplPath, onTemplateChange)
if err != nil { if err != nil {
glog.Fatalf("Error creating file watcher for %v: %v", tmplPath, err) klog.Fatalf("Error creating file watcher for %v: %v", tmplPath, err)
} }
filesToWatch := []string{} filesToWatch := []string{}
@ -199,16 +198,16 @@ Error loading new template: %v
}) })
if err != nil { if err != nil {
glog.Fatalf("Error creating file watchers: %v", err) klog.Fatalf("Error creating file watchers: %v", err)
} }
for _, f := range filesToWatch { for _, f := range filesToWatch {
_, err = watch.NewFileWatcher(f, func() { _, err = watch.NewFileWatcher(f, func() {
glog.Infof("File %v changed. Reloading NGINX", f) klog.Infof("File %v changed. Reloading NGINX", f)
n.syncQueue.EnqueueTask(task.GetDummyObject("file-change")) n.syncQueue.EnqueueTask(task.GetDummyObject("file-change"))
}) })
if err != nil { if err != nil {
glog.Fatalf("Error creating file watcher for %v: %v", f, err) klog.Fatalf("Error creating file watcher for %v: %v", f, err)
} }
} }
@ -262,7 +261,7 @@ type NGINXController struct {
// Start starts a new NGINX master process running in the foreground. // Start starts a new NGINX master process running in the foreground.
func (n *NGINXController) Start() { func (n *NGINXController) Start() {
glog.Info("Starting NGINX Ingress controller") klog.Info("Starting NGINX Ingress controller")
n.store.Run(n.stopCh) n.store.Run(n.stopCh)
@ -283,7 +282,7 @@ func (n *NGINXController) Start() {
n.setupSSLProxy() n.setupSSLProxy()
} }
glog.Info("Starting NGINX process") klog.Info("Starting NGINX process")
n.start(cmd) n.start(cmd)
go n.syncQueue.Run(time.Second, n.stopCh) go n.syncQueue.Run(time.Second, n.stopCh)
@ -319,7 +318,7 @@ func (n *NGINXController) Start() {
break break
} }
if evt, ok := event.(store.Event); ok { if evt, ok := event.(store.Event); ok {
glog.V(3).Infof("Event %v received - object %v", evt.Type, evt.Obj) klog.V(3).Infof("Event %v received - object %v", evt.Type, evt.Obj)
if evt.Type == store.ConfigurationEvent { if evt.Type == store.ConfigurationEvent {
// TODO: is this necessary? Consider removing this special case // TODO: is this necessary? Consider removing this special case
n.syncQueue.EnqueueTask(task.GetDummyObject("configmap-change")) n.syncQueue.EnqueueTask(task.GetDummyObject("configmap-change"))
@ -328,7 +327,7 @@ func (n *NGINXController) Start() {
n.syncQueue.EnqueueSkippableTask(evt.Obj) n.syncQueue.EnqueueSkippableTask(evt.Obj)
} else { } else {
glog.Warningf("Unexpected event type received %T", event) klog.Warningf("Unexpected event type received %T", event)
} }
case <-n.stopCh: case <-n.stopCh:
break break
@ -347,7 +346,7 @@ func (n *NGINXController) Stop() error {
return fmt.Errorf("shutdown already in progress") return fmt.Errorf("shutdown already in progress")
} }
glog.Info("Shutting down controller queues") klog.Info("Shutting down controller queues")
close(n.stopCh) close(n.stopCh)
go n.syncQueue.Shutdown() go n.syncQueue.Shutdown()
if n.syncStatus != nil { if n.syncStatus != nil {
@ -355,7 +354,7 @@ func (n *NGINXController) Stop() error {
} }
// send stop signal to NGINX // send stop signal to NGINX
glog.Info("Stopping NGINX process") klog.Info("Stopping NGINX process")
cmd := nginxExecCommand("-s", "quit") cmd := nginxExecCommand("-s", "quit")
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
@ -368,7 +367,7 @@ func (n *NGINXController) Stop() error {
timer := time.NewTicker(time.Second * 1) timer := time.NewTicker(time.Second * 1)
for range timer.C { for range timer.C {
if !process.IsNginxRunning() { if !process.IsNginxRunning() {
glog.Info("NGINX process has stopped") klog.Info("NGINX process has stopped")
timer.Stop() timer.Stop()
break break
} }
@ -381,7 +380,7 @@ func (n *NGINXController) start(cmd *exec.Cmd) {
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
glog.Fatalf("NGINX error: %v", err) klog.Fatalf("NGINX error: %v", err)
n.ngxErrCh <- err n.ngxErrCh <- err
return return
} }
@ -444,7 +443,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
for _, pb := range ingressCfg.PassthroughBackends { for _, pb := range ingressCfg.PassthroughBackends {
svc := pb.Service svc := pb.Service
if svc == nil { if svc == nil {
glog.Warningf("Missing Service for SSL Passthrough backend %q", pb.Backend) klog.Warningf("Missing Service for SSL Passthrough backend %q", pb.Backend)
continue continue
} }
port, err := strconv.Atoi(pb.Port.String()) port, err := strconv.Atoi(pb.Port.String())
@ -497,7 +496,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
} else { } else {
n = fmt.Sprintf("www.%v", srv.Hostname) n = fmt.Sprintf("www.%v", srv.Hostname)
} }
glog.V(3).Infof("Creating redirect from %q to %q", srv.Hostname, n) klog.V(3).Infof("Creating redirect from %q to %q", srv.Hostname, n)
if _, ok := redirectServers[n]; !ok { if _, ok := redirectServers[n]; !ok {
found := false found := false
for _, esrv := range ingressCfg.Servers { for _, esrv := range ingressCfg.Servers {
@ -514,24 +513,24 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
} }
if cfg.ServerNameHashBucketSize == 0 { if cfg.ServerNameHashBucketSize == 0 {
nameHashBucketSize := nginxHashBucketSize(longestName) nameHashBucketSize := nginxHashBucketSize(longestName)
glog.V(3).Infof("Adjusting ServerNameHashBucketSize variable to %d", nameHashBucketSize) klog.V(3).Infof("Adjusting ServerNameHashBucketSize variable to %d", nameHashBucketSize)
cfg.ServerNameHashBucketSize = nameHashBucketSize cfg.ServerNameHashBucketSize = nameHashBucketSize
} }
serverNameHashMaxSize := nextPowerOf2(serverNameBytes) serverNameHashMaxSize := nextPowerOf2(serverNameBytes)
if cfg.ServerNameHashMaxSize < serverNameHashMaxSize { if cfg.ServerNameHashMaxSize < serverNameHashMaxSize {
glog.V(3).Infof("Adjusting ServerNameHashMaxSize variable to %d", serverNameHashMaxSize) klog.V(3).Infof("Adjusting ServerNameHashMaxSize variable to %d", serverNameHashMaxSize)
cfg.ServerNameHashMaxSize = serverNameHashMaxSize cfg.ServerNameHashMaxSize = serverNameHashMaxSize
} }
// the limit of open files is per worker process // the limit of open files is per worker process
// and we leave some room to avoid consuming all the FDs available // and we leave some room to avoid consuming all the FDs available
wp, err := strconv.Atoi(cfg.WorkerProcesses) wp, err := strconv.Atoi(cfg.WorkerProcesses)
glog.V(3).Infof("Number of worker processes: %d", wp) klog.V(3).Infof("Number of worker processes: %d", wp)
if err != nil { if err != nil {
wp = 1 wp = 1
} }
maxOpenFiles := (sysctlFSFileMax() / wp) - 1024 maxOpenFiles := (sysctlFSFileMax() / wp) - 1024
glog.V(2).Infof("Maximum number of open file descriptors: %d", maxOpenFiles) klog.V(2).Infof("Maximum number of open file descriptors: %d", maxOpenFiles)
if maxOpenFiles < 1024 { if maxOpenFiles < 1024 {
// this means the value of RLIMIT_NOFILE is too low. // this means the value of RLIMIT_NOFILE is too low.
maxOpenFiles = 1024 maxOpenFiles = 1024
@ -541,7 +540,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if cfg.ProxySetHeaders != "" { if cfg.ProxySetHeaders != "" {
cmap, err := n.store.GetConfigMap(cfg.ProxySetHeaders) cmap, err := n.store.GetConfigMap(cfg.ProxySetHeaders)
if err != nil { if err != nil {
glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.ProxySetHeaders, err) klog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.ProxySetHeaders, err)
} }
setHeaders = cmap.Data setHeaders = cmap.Data
@ -551,7 +550,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if cfg.AddHeaders != "" { if cfg.AddHeaders != "" {
cmap, err := n.store.GetConfigMap(cfg.AddHeaders) cmap, err := n.store.GetConfigMap(cfg.AddHeaders)
if err != nil { if err != nil {
glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.AddHeaders, err) klog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.AddHeaders, err)
} }
addHeaders = cmap.Data addHeaders = cmap.Data
@ -563,7 +562,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
secret, err := n.store.GetSecret(secretName) secret, err := n.store.GetSecret(secretName)
if err != nil { if err != nil {
glog.Warningf("Error reading Secret %q from local store: %v", secretName, err) klog.Warningf("Error reading Secret %q from local store: %v", secretName, err)
} }
nsSecName := strings.Replace(secretName, "/", "-", -1) nsSecName := strings.Replace(secretName, "/", "-", -1)
@ -572,7 +571,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if ok { if ok {
pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh, n.fileSystem) pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh, n.fileSystem)
if err != nil { if err != nil {
glog.Warningf("Error adding or updating dhparam file %v: %v", nsSecName, err) klog.Warningf("Error adding or updating dhparam file %v: %v", nsSecName, err)
} else { } else {
sslDHParam = pemFileName sslDHParam = pemFileName
} }
@ -624,7 +623,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
return err return err
} }
if glog.V(2) { if klog.V(2) {
src, _ := ioutil.ReadFile(cfgPath) src, _ := ioutil.ReadFile(cfgPath)
if !bytes.Equal(src, content) { if !bytes.Equal(src, content) {
tmpfile, err := ioutil.TempFile("", "new-nginx-cfg") tmpfile, err := ioutil.TempFile("", "new-nginx-cfg")
@ -640,7 +639,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
// TODO: executing diff can return exit code != 0 // TODO: executing diff can return exit code != 0
diffOutput, _ := exec.Command("diff", "-u", cfgPath, tmpfile.Name()).CombinedOutput() diffOutput, _ := exec.Command("diff", "-u", cfgPath, tmpfile.Name()).CombinedOutput()
glog.Infof("NGINX configuration diff:\n%v", string(diffOutput)) klog.Infof("NGINX configuration diff:\n%v", string(diffOutput))
// we do not defer the deletion of temp files in order // we do not defer the deletion of temp files in order
// to keep them around for inspection in case of error // to keep them around for inspection in case of error
@ -691,7 +690,7 @@ func (n *NGINXController) setupSSLProxy() {
sslPort := n.cfg.ListenPorts.HTTPS sslPort := n.cfg.ListenPorts.HTTPS
proxyPort := n.cfg.ListenPorts.SSLProxy proxyPort := n.cfg.ListenPorts.SSLProxy
glog.Info("Starting TLS proxy for SSL Passthrough") klog.Info("Starting TLS proxy for SSL Passthrough")
n.Proxy = &TCPProxy{ n.Proxy = &TCPProxy{
Default: &TCPServer{ Default: &TCPServer{
Hostname: "localhost", Hostname: "localhost",
@ -703,7 +702,7 @@ func (n *NGINXController) setupSSLProxy() {
listener, err := net.Listen("tcp", fmt.Sprintf(":%v", sslPort)) listener, err := net.Listen("tcp", fmt.Sprintf(":%v", sslPort))
if err != nil { if err != nil {
glog.Fatalf("%v", err) klog.Fatalf("%v", err)
} }
proxyList := &proxyproto.Listener{Listener: listener, ProxyHeaderTimeout: cfg.ProxyProtocolHeaderTimeout} proxyList := &proxyproto.Listener{Listener: listener, ProxyHeaderTimeout: cfg.ProxyProtocolHeaderTimeout}
@ -723,11 +722,11 @@ func (n *NGINXController) setupSSLProxy() {
} }
if err != nil { if err != nil {
glog.Warningf("Error accepting TCP connection: %v", err) klog.Warningf("Error accepting TCP connection: %v", err)
continue continue
} }
glog.V(3).Infof("Handling connection from remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr()) klog.V(3).Infof("Handling connection from remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr())
go n.Proxy.Handle(conn) go n.Proxy.Handle(conn)
} }
}() }()
@ -884,7 +883,7 @@ func post(url string, data interface{}) error {
return err return err
} }
glog.V(2).Infof("Posting to %s", url) klog.V(2).Infof("Posting to %s", url)
resp, err := http.Post(url, "application/json", bytes.NewReader(buf)) resp, err := http.Post(url, "application/json", bytes.NewReader(buf))
if err != nil { if err != nil {
return err return err
@ -892,7 +891,7 @@ func post(url string, data interface{}) error {
defer func() { defer func() {
if err := resp.Body.Close(); err != nil { if err := resp.Body.Close(); err != nil {
glog.Warningf("Error while closing response body:\n%v", err) klog.Warningf("Error while closing response body:\n%v", err)
} }
}() }()

View file

@ -24,9 +24,9 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/golang/glog"
ps "github.com/mitchellh/go-ps" ps "github.com/mitchellh/go-ps"
"github.com/ncabatoff/process-exporter/proc" "github.com/ncabatoff/process-exporter/proc"
"k8s.io/klog"
) )
// IsRespawnIfRequired checks if error type is exec.ExitError or not // IsRespawnIfRequired checks if error type is exec.ExitError or not
@ -37,7 +37,7 @@ func IsRespawnIfRequired(err error) bool {
} }
waitStatus := exitError.Sys().(syscall.WaitStatus) waitStatus := exitError.Sys().(syscall.WaitStatus)
glog.Warningf(` klog.Warningf(`
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
NGINX master process died (%v): %v NGINX master process died (%v): %v
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
@ -56,9 +56,9 @@ func WaitUntilPortIsAvailable(port int) {
} }
conn.Close() conn.Close()
// kill nginx worker processes // kill nginx worker processes
fs, err := proc.NewFS("/proc") fs, err := proc.NewFS("/proc", false)
if err != nil { if err != nil {
glog.Errorf("unexpected error reading /proc information: %v", err) klog.Errorf("unexpected error reading /proc information: %v", err)
continue continue
} }
@ -66,14 +66,14 @@ func WaitUntilPortIsAvailable(port int) {
for _, p := range procs { for _, p := range procs {
pn, err := p.Comm() pn, err := p.Comm()
if err != nil { if err != nil {
glog.Errorf("unexpected error obtaining process information: %v", err) klog.Errorf("unexpected error obtaining process information: %v", err)
continue continue
} }
if pn == "nginx" { if pn == "nginx" {
osp, err := os.FindProcess(p.PID) osp, err := os.FindProcess(p.PID)
if err != nil { if err != nil {
glog.Errorf("unexpected error obtaining process information: %v", err) klog.Errorf("unexpected error obtaining process information: %v", err)
continue continue
} }
osp.Signal(syscall.SIGQUIT) osp.Signal(syscall.SIGQUIT)

View file

@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/golang/glog"
"github.com/imdario/mergo" "github.com/imdario/mergo"
"k8s.io/klog"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
@ -39,13 +39,13 @@ func (s k8sStore) syncSecret(key string) {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
glog.V(3).Infof("Syncing Secret %q", key) klog.V(3).Infof("Syncing Secret %q", key)
// TODO: getPemCertificate should not write to disk to avoid unnecessary overhead // TODO: getPemCertificate should not write to disk to avoid unnecessary overhead
cert, err := s.getPemCertificate(key) cert, err := s.getPemCertificate(key)
if err != nil { if err != nil {
if !isErrSecretForAuth(err) { if !isErrSecretForAuth(err) {
glog.Warningf("Error obtaining X.509 certificate: %v", err) klog.Warningf("Error obtaining X.509 certificate: %v", err)
} }
return return
} }
@ -57,7 +57,7 @@ func (s k8sStore) syncSecret(key string) {
// no need to update // no need to update
return return
} }
glog.Infof("Updating Secret %q in the local store", key) klog.Infof("Updating Secret %q in the local store", key)
s.sslStore.Update(key, cert) s.sslStore.Update(key, cert)
// this update must trigger an update // this update must trigger an update
// (like an update event from a change in Ingress) // (like an update event from a change in Ingress)
@ -65,7 +65,7 @@ func (s k8sStore) syncSecret(key string) {
return return
} }
glog.Infof("Adding Secret %q to the local store", key) klog.Infof("Adding Secret %q to the local store", key)
s.sslStore.Add(key, cert) s.sslStore.Add(key, cert)
// this update must trigger an update // this update must trigger an update
// (like an update event from a change in Ingress) // (like an update event from a change in Ingress)
@ -116,7 +116,7 @@ func (s k8sStore) getPemCertificate(secretName string) (*ingress.SSLCert, error)
if ca != nil { if ca != nil {
msg += " and authentication" msg += " and authentication"
} }
glog.V(3).Info(msg) klog.V(3).Info(msg)
} else if ca != nil { } else if ca != nil {
sslCert, err = ssl.AddCertAuth(nsSecName, ca, s.filesystem) sslCert, err = ssl.AddCertAuth(nsSecName, ca, s.filesystem)
@ -127,7 +127,7 @@ func (s k8sStore) getPemCertificate(secretName string) (*ingress.SSLCert, error)
// makes this secret in 'syncSecret' to be used for Certificate Authentication // makes this secret in 'syncSecret' to be used for Certificate Authentication
// this does not enable Certificate Authentication // this does not enable Certificate Authentication
glog.V(3).Infof("Configuring Secret %q for TLS authentication", secretName) klog.V(3).Infof("Configuring Secret %q for TLS authentication", secretName)
} else { } else {
if auth != nil { if auth != nil {
@ -158,7 +158,7 @@ func (s k8sStore) checkSSLChainIssues() {
data, err := ssl.FullChainCert(secret.PemFileName, s.filesystem) data, err := ssl.FullChainCert(secret.PemFileName, s.filesystem)
if err != nil { if err != nil {
glog.Errorf("Error generating CA certificate chain for Secret %q: %v", secrKey, err) klog.Errorf("Error generating CA certificate chain for Secret %q: %v", secrKey, err)
continue continue
} }
@ -166,13 +166,13 @@ func (s k8sStore) checkSSLChainIssues() {
file, err := s.filesystem.Create(fullChainPemFileName) file, err := s.filesystem.Create(fullChainPemFileName)
if err != nil { if err != nil {
glog.Errorf("Error creating SSL certificate file for Secret %q: %v", secrKey, err) klog.Errorf("Error creating SSL certificate file for Secret %q: %v", secrKey, err)
continue continue
} }
_, err = file.Write(data) _, err = file.Write(data)
if err != nil { if err != nil {
glog.Errorf("Error creating SSL certificate for Secret %q: %v", secrKey, err) klog.Errorf("Error creating SSL certificate for Secret %q: %v", secrKey, err)
continue continue
} }
@ -180,13 +180,13 @@ func (s k8sStore) checkSSLChainIssues() {
err = mergo.MergeWithOverwrite(dst, secret) err = mergo.MergeWithOverwrite(dst, secret)
if err != nil { if err != nil {
glog.Errorf("Error creating SSL certificate for Secret %q: %v", secrKey, err) klog.Errorf("Error creating SSL certificate for Secret %q: %v", secrKey, err)
continue continue
} }
dst.FullChainPemFileName = fullChainPemFileName dst.FullChainPemFileName = fullChainPemFileName
glog.Infof("Updating local copy of SSL certificate %q with missing intermediate CA certs", secrKey) klog.Infof("Updating local copy of SSL certificate %q with missing intermediate CA certs", secrKey)
s.sslStore.Update(secrKey, dst) s.sslStore.Update(secrKey, dst)
// this update must trigger an update // this update must trigger an update
// (like an update event from a change in Ingress) // (like an update event from a change in Ingress)

View file

@ -25,7 +25,6 @@ import (
"time" "time"
"github.com/eapache/channels" "github.com/eapache/channels"
"github.com/golang/glog"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
@ -41,6 +40,7 @@ import (
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/klog"
"k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/file"
"k8s.io/ingress-nginx/internal/ingress" "k8s.io/ingress-nginx/internal/ingress"
@ -247,7 +247,7 @@ func New(checkOCSP bool,
} }
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{ eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{
Interface: client.CoreV1().Events(namespace), Interface: client.CoreV1().Events(namespace),
}) })
@ -305,7 +305,7 @@ func New(checkOCSP bool,
ing := obj.(*extensions.Ingress) ing := obj.(*extensions.Ingress)
if !class.IsValid(ing) { if !class.IsValid(ing) {
a, _ := parser.GetStringAnnotation(class.IngressKey, ing) a, _ := parser.GetStringAnnotation(class.IngressKey, ing)
glog.Infof("ignoring add for ingress %v based on annotation %v with value %v", ing.Name, class.IngressKey, a) klog.Infof("ignoring add for ingress %v based on annotation %v with value %v", ing.Name, class.IngressKey, a)
return return
} }
recorder.Eventf(ing, corev1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", ing.Namespace, ing.Name)) recorder.Eventf(ing, corev1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", ing.Namespace, ing.Name))
@ -325,17 +325,17 @@ func New(checkOCSP bool,
// If we reached here it means the ingress was deleted but its final state is unrecorded. // If we reached here it means the ingress was deleted but its final state is unrecorded.
tombstone, ok := obj.(cache.DeletedFinalStateUnknown) tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok { if !ok {
glog.Errorf("couldn't get object from tombstone %#v", obj) klog.Errorf("couldn't get object from tombstone %#v", obj)
return return
} }
ing, ok = tombstone.Obj.(*extensions.Ingress) ing, ok = tombstone.Obj.(*extensions.Ingress)
if !ok { if !ok {
glog.Errorf("Tombstone contained object that is not an Ingress: %#v", obj) klog.Errorf("Tombstone contained object that is not an Ingress: %#v", obj)
return return
} }
} }
if !class.IsValid(ing) { if !class.IsValid(ing) {
glog.Infof("ignoring delete for ingress %v based on annotation %v", ing.Name, class.IngressKey) klog.Infof("ignoring delete for ingress %v based on annotation %v", ing.Name, class.IngressKey)
return return
} }
recorder.Eventf(ing, corev1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", ing.Namespace, ing.Name)) recorder.Eventf(ing, corev1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", ing.Namespace, ing.Name))
@ -356,10 +356,10 @@ func New(checkOCSP bool,
validOld := class.IsValid(oldIng) validOld := class.IsValid(oldIng)
validCur := class.IsValid(curIng) validCur := class.IsValid(curIng)
if !validOld && validCur { if !validOld && validCur {
glog.Infof("creating ingress %v based on annotation %v", curIng.Name, class.IngressKey) klog.Infof("creating ingress %v based on annotation %v", curIng.Name, class.IngressKey)
recorder.Eventf(curIng, corev1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) recorder.Eventf(curIng, corev1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name))
} else if validOld && !validCur { } else if validOld && !validCur {
glog.Infof("removing ingress %v based on annotation %v", curIng.Name, class.IngressKey) klog.Infof("removing ingress %v based on annotation %v", curIng.Name, class.IngressKey)
recorder.Eventf(curIng, corev1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) recorder.Eventf(curIng, corev1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name))
} else if validCur && !reflect.DeepEqual(old, cur) { } else if validCur && !reflect.DeepEqual(old, cur) {
recorder.Eventf(curIng, corev1.EventTypeNormal, "UPDATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) recorder.Eventf(curIng, corev1.EventTypeNormal, "UPDATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name))
@ -387,11 +387,11 @@ func New(checkOCSP bool,
// find references in ingresses and update local ssl certs // find references in ingresses and update local ssl certs
if ings := store.secretIngressMap.Reference(key); len(ings) > 0 { if ings := store.secretIngressMap.Reference(key); len(ings) > 0 {
glog.Infof("secret %v was added and it is used in ingress annotations. Parsing...", key) klog.Infof("secret %v was added and it is used in ingress annotations. Parsing...", key)
for _, ingKey := range ings { for _, ingKey := range ings {
ing, err := store.getIngress(ingKey) ing, err := store.getIngress(ingKey)
if err != nil { if err != nil {
glog.Errorf("could not find Ingress %v in local store", ingKey) klog.Errorf("could not find Ingress %v in local store", ingKey)
continue continue
} }
store.syncIngress(ing) store.syncIngress(ing)
@ -414,11 +414,11 @@ func New(checkOCSP bool,
// find references in ingresses and update local ssl certs // find references in ingresses and update local ssl certs
if ings := store.secretIngressMap.Reference(key); len(ings) > 0 { if ings := store.secretIngressMap.Reference(key); len(ings) > 0 {
glog.Infof("secret %v was updated and it is used in ingress annotations. Parsing...", key) klog.Infof("secret %v was updated and it is used in ingress annotations. Parsing...", key)
for _, ingKey := range ings { for _, ingKey := range ings {
ing, err := store.getIngress(ingKey) ing, err := store.getIngress(ingKey)
if err != nil { if err != nil {
glog.Errorf("could not find Ingress %v in local store", ingKey) klog.Errorf("could not find Ingress %v in local store", ingKey)
continue continue
} }
store.syncIngress(ing) store.syncIngress(ing)
@ -437,12 +437,12 @@ func New(checkOCSP bool,
// If we reached here it means the secret was deleted but its final state is unrecorded. // If we reached here it means the secret was deleted but its final state is unrecorded.
tombstone, ok := obj.(cache.DeletedFinalStateUnknown) tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok { if !ok {
glog.Errorf("couldn't get object from tombstone %#v", obj) klog.Errorf("couldn't get object from tombstone %#v", obj)
return return
} }
sec, ok = tombstone.Obj.(*corev1.Secret) sec, ok = tombstone.Obj.(*corev1.Secret)
if !ok { if !ok {
glog.Errorf("Tombstone contained object that is not a Secret: %#v", obj) klog.Errorf("Tombstone contained object that is not a Secret: %#v", obj)
return return
} }
} }
@ -453,11 +453,11 @@ func New(checkOCSP bool,
// find references in ingresses // find references in ingresses
if ings := store.secretIngressMap.Reference(key); len(ings) > 0 { if ings := store.secretIngressMap.Reference(key); len(ings) > 0 {
glog.Infof("secret %v was deleted and it is used in ingress annotations. Parsing...", key) klog.Infof("secret %v was deleted and it is used in ingress annotations. Parsing...", key)
for _, ingKey := range ings { for _, ingKey := range ings {
ing, err := store.getIngress(ingKey) ing, err := store.getIngress(ingKey)
if err != nil { if err != nil {
glog.Errorf("could not find Ingress %v in local store", ingKey) klog.Errorf("could not find Ingress %v in local store", ingKey)
continue continue
} }
store.syncIngress(ing) store.syncIngress(ing)
@ -527,7 +527,7 @@ func New(checkOCSP bool,
key := k8s.MetaNamespaceKey(ingKey) key := k8s.MetaNamespaceKey(ingKey)
ing, err := store.getIngress(key) ing, err := store.getIngress(key)
if err != nil { if err != nil {
glog.Errorf("could not find Ingress %v in local store: %v", key, err) klog.Errorf("could not find Ingress %v in local store: %v", key, err)
continue continue
} }
store.syncIngress(ing) store.syncIngress(ing)
@ -581,7 +581,7 @@ func New(checkOCSP bool,
ns, name, _ := k8s.ParseNameNS(configmap) ns, name, _ := k8s.ParseNameNS(configmap)
cm, err := client.CoreV1().ConfigMaps(ns).Get(name, metav1.GetOptions{}) cm, err := client.CoreV1().ConfigMaps(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
glog.Warningf("Unexpected error reading configuration configmap: %v", err) klog.Warningf("Unexpected error reading configuration configmap: %v", err)
} }
store.setConfig(cm) store.setConfig(cm)
@ -592,7 +592,7 @@ func New(checkOCSP bool,
// annotation to a go struct // annotation to a go struct
func (s *k8sStore) syncIngress(ing *extensions.Ingress) { func (s *k8sStore) syncIngress(ing *extensions.Ingress) {
key := k8s.MetaNamespaceKey(ing) key := k8s.MetaNamespaceKey(ing)
glog.V(3).Infof("updating annotations information for ingress %v", key) klog.V(3).Infof("updating annotations information for ingress %v", key)
copyIng := &extensions.Ingress{} copyIng := &extensions.Ingress{}
ing.ObjectMeta.DeepCopyInto(&copyIng.ObjectMeta) ing.ObjectMeta.DeepCopyInto(&copyIng.ObjectMeta)
@ -615,7 +615,7 @@ func (s *k8sStore) syncIngress(ing *extensions.Ingress) {
ParsedAnnotations: s.annotations.Extract(ing), ParsedAnnotations: s.annotations.Extract(ing),
}) })
if err != nil { if err != nil {
glog.Error(err) klog.Error(err)
} }
} }
@ -623,7 +623,7 @@ func (s *k8sStore) syncIngress(ing *extensions.Ingress) {
// references in secretIngressMap. // references in secretIngressMap.
func (s *k8sStore) updateSecretIngressMap(ing *extensions.Ingress) { func (s *k8sStore) updateSecretIngressMap(ing *extensions.Ingress) {
key := k8s.MetaNamespaceKey(ing) key := k8s.MetaNamespaceKey(ing)
glog.V(3).Infof("updating references to secrets for ingress %v", key) klog.V(3).Infof("updating references to secrets for ingress %v", key)
// delete all existing references first // delete all existing references first
s.secretIngressMap.Delete(key) s.secretIngressMap.Delete(key)
@ -649,7 +649,7 @@ func (s *k8sStore) updateSecretIngressMap(ing *extensions.Ingress) {
for _, ann := range secretAnnotations { for _, ann := range secretAnnotations {
secrKey, err := objectRefAnnotationNsKey(ann, ing) secrKey, err := objectRefAnnotationNsKey(ann, ing)
if err != nil && !errors.IsMissingAnnotations(err) { if err != nil && !errors.IsMissingAnnotations(err) {
glog.Errorf("error reading secret reference in annotation %q: %s", ann, err) klog.Errorf("error reading secret reference in annotation %q: %s", ann, err)
continue continue
} }
if secrKey != "" { if secrKey != "" {
@ -775,18 +775,18 @@ func (s k8sStore) writeSSLSessionTicketKey(cmap *corev1.ConfigMap, fileName stri
// 81 used instead of 80 because of padding // 81 used instead of 80 because of padding
if !(ticketBytes == 48 || ticketBytes == 81) { if !(ticketBytes == 48 || ticketBytes == 81) {
glog.Warningf("ssl-session-ticket-key must contain either 48 or 80 bytes") klog.Warningf("ssl-session-ticket-key must contain either 48 or 80 bytes")
} }
decodedTicket, err := base64.StdEncoding.DecodeString(ticketString) decodedTicket, err := base64.StdEncoding.DecodeString(ticketString)
if err != nil { if err != nil {
glog.Errorf("unexpected error decoding ssl-session-ticket-key: %v", err) klog.Errorf("unexpected error decoding ssl-session-ticket-key: %v", err)
return return
} }
err = ioutil.WriteFile(fileName, decodedTicket, file.ReadWriteByUser) err = ioutil.WriteFile(fileName, decodedTicket, file.ReadWriteByUser)
if err != nil { if err != nil {
glog.Errorf("unexpected error writing ssl-session-ticket-key to %s: %v", fileName, err) klog.Errorf("unexpected error writing ssl-session-ticket-key to %s: %v", fileName, err)
return return
} }

View file

@ -21,7 +21,7 @@ import (
"io" "io"
"net" "net"
"github.com/golang/glog" "k8s.io/klog"
"github.com/paultag/sniff/parser" "github.com/paultag/sniff/parser"
) )
@ -63,19 +63,19 @@ func (p *TCPProxy) Handle(conn net.Conn) {
length, err := conn.Read(data) length, err := conn.Read(data)
if err != nil { if err != nil {
glog.V(4).Infof("Error reading the first 4k of the connection: %s", err) klog.V(4).Infof("Error reading the first 4k of the connection: %s", err)
return return
} }
proxy := p.Default proxy := p.Default
hostname, err := parser.GetHostname(data[:]) hostname, err := parser.GetHostname(data[:])
if err == nil { if err == nil {
glog.V(4).Infof("Parsed hostname from TLS Client Hello: %s", hostname) klog.V(4).Infof("Parsed hostname from TLS Client Hello: %s", hostname)
proxy = p.Get(hostname) proxy = p.Get(hostname)
} }
if proxy == nil { if proxy == nil {
glog.V(4).Infof("There is no configured proxy for SSL connections.") klog.V(4).Infof("There is no configured proxy for SSL connections.")
return return
} }
@ -96,16 +96,16 @@ func (p *TCPProxy) Handle(conn net.Conn) {
protocol = "TCP6" protocol = "TCP6"
} }
proxyProtocolHeader := fmt.Sprintf("PROXY %s %s %s %d %d\r\n", protocol, remoteAddr.IP.String(), localAddr.IP.String(), remoteAddr.Port, localAddr.Port) proxyProtocolHeader := fmt.Sprintf("PROXY %s %s %s %d %d\r\n", protocol, remoteAddr.IP.String(), localAddr.IP.String(), remoteAddr.Port, localAddr.Port)
glog.V(4).Infof("Writing Proxy Protocol header: %s", proxyProtocolHeader) klog.V(4).Infof("Writing Proxy Protocol header: %s", proxyProtocolHeader)
_, err = fmt.Fprintf(clientConn, proxyProtocolHeader) _, err = fmt.Fprintf(clientConn, proxyProtocolHeader)
} }
if err != nil { if err != nil {
glog.Errorf("Error writing Proxy Protocol header: %s", err) klog.Errorf("Error writing Proxy Protocol header: %s", err)
clientConn.Close() clientConn.Close()
} else { } else {
_, err = clientConn.Write(data[:length]) _, err = clientConn.Write(data[:length])
if err != nil { if err != nil {
glog.Errorf("Error writing the first 4k of proxy data: %s", err) klog.Errorf("Error writing the first 4k of proxy data: %s", err)
clientConn.Close() clientConn.Close()
} }
} }

View file

@ -23,7 +23,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/golang/glog" "k8s.io/klog"
"github.com/mitchellh/hashstructure" "github.com/mitchellh/hashstructure"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
@ -83,7 +83,7 @@ func ReadConfig(src map[string]string) config.Configuration {
for _, i := range strings.Split(val, ",") { for _, i := range strings.Split(val, ",") {
j, err := strconv.Atoi(i) j, err := strconv.Atoi(i)
if err != nil { if err != nil {
glog.Warningf("%v is not a valid http code: %v", i, err) klog.Warningf("%v is not a valid http code: %v", i, err)
} else { } else {
errors = append(errors, j) errors = append(errors, j)
} }
@ -118,7 +118,7 @@ func ReadConfig(src map[string]string) config.Configuration {
bindAddressIpv4List = append(bindAddressIpv4List, fmt.Sprintf("%v", ns)) bindAddressIpv4List = append(bindAddressIpv4List, fmt.Sprintf("%v", ns))
} }
} else { } else {
glog.Warningf("%v is not a valid textual representation of an IP address", i) klog.Warningf("%v is not a valid textual representation of an IP address", i)
} }
} }
} }
@ -140,12 +140,12 @@ func ReadConfig(src map[string]string) config.Configuration {
delete(conf, httpRedirectCode) delete(conf, httpRedirectCode)
j, err := strconv.Atoi(val) j, err := strconv.Atoi(val)
if err != nil { if err != nil {
glog.Warningf("%v is not a valid HTTP code: %v", val, err) klog.Warningf("%v is not a valid HTTP code: %v", val, err)
} else { } else {
if validRedirectCodes.Has(j) { if validRedirectCodes.Has(j) {
to.HTTPRedirectCode = j to.HTTPRedirectCode = j
} else { } else {
glog.Warningf("The code %v is not a valid as HTTP redirect code. Using the default.", val) klog.Warningf("The code %v is not a valid as HTTP redirect code. Using the default.", val)
} }
} }
} }
@ -155,7 +155,7 @@ func ReadConfig(src map[string]string) config.Configuration {
delete(conf, proxyHeaderTimeout) delete(conf, proxyHeaderTimeout)
duration, err := time.ParseDuration(val) duration, err := time.ParseDuration(val)
if err != nil { if err != nil {
glog.Warningf("proxy-protocol-header-timeout of %v encountered an error while being parsed %v. Switching to use default value instead.", val, err) klog.Warningf("proxy-protocol-header-timeout of %v encountered an error while being parsed %v. Switching to use default value instead.", val, err)
} else { } else {
to.ProxyProtocolHeaderTimeout = duration to.ProxyProtocolHeaderTimeout = duration
} }
@ -166,7 +166,7 @@ func ReadConfig(src map[string]string) config.Configuration {
delete(conf, proxyStreamResponses) delete(conf, proxyStreamResponses)
j, err := strconv.Atoi(val) j, err := strconv.Atoi(val)
if err != nil { if err != nil {
glog.Warningf("%v is not a valid number: %v", val, err) klog.Warningf("%v is not a valid number: %v", val, err)
} else { } else {
streamResponses = j streamResponses = j
} }
@ -220,18 +220,18 @@ func ReadConfig(src map[string]string) config.Configuration {
decoder, err := mapstructure.NewDecoder(config) decoder, err := mapstructure.NewDecoder(config)
if err != nil { if err != nil {
glog.Warningf("unexpected error merging defaults: %v", err) klog.Warningf("unexpected error merging defaults: %v", err)
} }
err = decoder.Decode(conf) err = decoder.Decode(conf)
if err != nil { if err != nil {
glog.Warningf("unexpected error merging defaults: %v", err) klog.Warningf("unexpected error merging defaults: %v", err)
} }
hash, err := hashstructure.Hash(to, &hashstructure.HashOptions{ hash, err := hashstructure.Hash(to, &hashstructure.HashOptions{
TagName: "json", TagName: "json",
}) })
if err != nil { if err != nil {
glog.Warningf("unexpected error obtaining hash: %v", err) klog.Warningf("unexpected error obtaining hash: %v", err)
} }
to.Checksum = fmt.Sprintf("%v", hash) to.Checksum = fmt.Sprintf("%v", hash)
@ -245,7 +245,7 @@ func filterErrors(codes []int) []int {
if code > 299 && code < 600 { if code > 299 && code < 600 {
fa = append(fa, code) fa = append(fa, code)
} else { } else {
glog.Warningf("error code %v is not valid for custom error pages", code) klog.Warningf("error code %v is not valid for custom error pages", code)
} }
} }

View file

@ -31,7 +31,6 @@ import (
text_template "text/template" text_template "text/template"
"time" "time"
"github.com/golang/glog"
"github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
@ -41,6 +40,7 @@ import (
"k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit" "k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit"
"k8s.io/ingress-nginx/internal/ingress/controller/config" "k8s.io/ingress-nginx/internal/ingress/controller/config"
ing_net "k8s.io/ingress-nginx/internal/net" ing_net "k8s.io/ingress-nginx/internal/net"
"k8s.io/klog"
) )
const ( const (
@ -84,12 +84,12 @@ func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) {
outCmdBuf := t.bp.Get() outCmdBuf := t.bp.Get()
defer t.bp.Put(outCmdBuf) defer t.bp.Put(outCmdBuf)
if glog.V(3) { if klog.V(3) {
b, err := json.Marshal(conf) b, err := json.Marshal(conf)
if err != nil { if err != nil {
glog.Errorf("unexpected error: %v", err) klog.Errorf("unexpected error: %v", err)
} }
glog.Infof("NGINX configuration: %v", string(b)) klog.Infof("NGINX configuration: %v", string(b))
} }
err := t.tmpl.Execute(tmplBuf, conf) err := t.tmpl.Execute(tmplBuf, conf)
@ -103,7 +103,7 @@ func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) {
cmd.Stdin = tmplBuf cmd.Stdin = tmplBuf
cmd.Stdout = outCmdBuf cmd.Stdout = outCmdBuf
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
glog.Warningf("unexpected error cleaning template: %v", err) klog.Warningf("unexpected error cleaning template: %v", err)
return tmplBuf.Bytes(), nil return tmplBuf.Bytes(), nil
} }
@ -202,7 +202,7 @@ func shouldConfigureLuaRestyWAF(disableLuaRestyWAF bool, mode string) bool {
func buildLuaSharedDictionaries(s interface{}, disableLuaRestyWAF bool) string { func buildLuaSharedDictionaries(s interface{}, disableLuaRestyWAF bool) string {
servers, ok := s.([]*ingress.Server) servers, ok := s.([]*ingress.Server)
if !ok { if !ok {
glog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s) klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s)
return "" return ""
} }
@ -238,12 +238,12 @@ func buildLuaSharedDictionaries(s interface{}, disableLuaRestyWAF bool) string {
func buildResolversForLua(res interface{}, disableIpv6 interface{}) string { func buildResolversForLua(res interface{}, disableIpv6 interface{}) string {
nss, ok := res.([]net.IP) nss, ok := res.([]net.IP)
if !ok { if !ok {
glog.Errorf("expected a '[]net.IP' type but %T was returned", res) klog.Errorf("expected a '[]net.IP' type but %T was returned", res)
return "" return ""
} }
no6, ok := disableIpv6.(bool) no6, ok := disableIpv6.(bool)
if !ok { if !ok {
glog.Errorf("expected a 'bool' type but %T was returned", disableIpv6) klog.Errorf("expected a 'bool' type but %T was returned", disableIpv6)
return "" return ""
} }
@ -267,12 +267,12 @@ func buildResolvers(res interface{}, disableIpv6 interface{}) string {
// NGINX need IPV6 addresses to be surrounded by brackets // NGINX need IPV6 addresses to be surrounded by brackets
nss, ok := res.([]net.IP) nss, ok := res.([]net.IP)
if !ok { if !ok {
glog.Errorf("expected a '[]net.IP' type but %T was returned", res) klog.Errorf("expected a '[]net.IP' type but %T was returned", res)
return "" return ""
} }
no6, ok := disableIpv6.(bool) no6, ok := disableIpv6.(bool)
if !ok { if !ok {
glog.Errorf("expected a 'bool' type but %T was returned", disableIpv6) klog.Errorf("expected a 'bool' type but %T was returned", disableIpv6)
return "" return ""
} }
@ -316,7 +316,7 @@ func stripLocationModifer(path string) string {
func enforceRegexModifier(input interface{}) bool { func enforceRegexModifier(input interface{}) bool {
locations, ok := input.([]*ingress.Location) locations, ok := input.([]*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected an '[]*ingress.Location' type but %T was returned", input) klog.Errorf("expected an '[]*ingress.Location' type but %T was returned", input)
return false return false
} }
@ -333,7 +333,7 @@ func enforceRegexModifier(input interface{}) bool {
func buildLocation(input interface{}, enforceRegex bool) string { func buildLocation(input interface{}, enforceRegex bool) string {
location, ok := input.(*ingress.Location) location, ok := input.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return slash return slash
} }
@ -360,7 +360,7 @@ func buildLocation(input interface{}, enforceRegex bool) string {
func buildAuthLocation(input interface{}) string { func buildAuthLocation(input interface{}) string {
location, ok := input.(*ingress.Location) location, ok := input.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return "" return ""
} }
@ -378,7 +378,7 @@ func buildAuthResponseHeaders(input interface{}) []string {
location, ok := input.(*ingress.Location) location, ok := input.(*ingress.Location)
res := []string{} res := []string{}
if !ok { if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return res return res
} }
@ -398,7 +398,7 @@ func buildAuthResponseHeaders(input interface{}) []string {
func buildLogFormatUpstream(input interface{}) string { func buildLogFormatUpstream(input interface{}) string {
cfg, ok := input.(config.Configuration) cfg, ok := input.(config.Configuration)
if !ok { if !ok {
glog.Errorf("expected a 'config.Configuration' type but %T was returned", input) klog.Errorf("expected a 'config.Configuration' type but %T was returned", input)
return "" return ""
} }
@ -412,13 +412,13 @@ func buildLogFormatUpstream(input interface{}) string {
func buildProxyPass(host string, b interface{}, loc interface{}) string { func buildProxyPass(host string, b interface{}, loc interface{}) string {
backends, ok := b.([]*ingress.Backend) backends, ok := b.([]*ingress.Backend)
if !ok { if !ok {
glog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b) klog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b)
return "" return ""
} }
location, ok := loc.(*ingress.Location) location, ok := loc.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return "" return ""
} }
@ -520,7 +520,7 @@ func filterRateLimits(input interface{}) []ratelimit.Config {
servers, ok := input.([]*ingress.Server) servers, ok := input.([]*ingress.Server)
if !ok { if !ok {
glog.Errorf("expected a '[]ratelimit.RateLimit' type but %T was returned", input) klog.Errorf("expected a '[]ratelimit.RateLimit' type but %T was returned", input)
return ratelimits return ratelimits
} }
for _, server := range servers { for _, server := range servers {
@ -544,7 +544,7 @@ func buildRateLimitZones(input interface{}) []string {
servers, ok := input.([]*ingress.Server) servers, ok := input.([]*ingress.Server)
if !ok { if !ok {
glog.Errorf("expected a '[]*ingress.Server' type but %T was returned", input) klog.Errorf("expected a '[]*ingress.Server' type but %T was returned", input)
return zones.List() return zones.List()
} }
@ -594,7 +594,7 @@ func buildRateLimit(input interface{}) []string {
loc, ok := input.(*ingress.Location) loc, ok := input.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return limits return limits
} }
@ -634,7 +634,7 @@ func buildRateLimit(input interface{}) []string {
func isLocationInLocationList(location interface{}, rawLocationList string) bool { func isLocationInLocationList(location interface{}, rawLocationList string) bool {
loc, ok := location.(*ingress.Location) loc, ok := location.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", location) klog.Errorf("expected an '*ingress.Location' type but %T was returned", location)
return false return false
} }
@ -656,7 +656,7 @@ func isLocationInLocationList(location interface{}, rawLocationList string) bool
func isLocationAllowed(input interface{}) bool { func isLocationAllowed(input interface{}) bool {
loc, ok := input.(*ingress.Location) loc, ok := input.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) klog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return false return false
} }
@ -675,7 +675,7 @@ var (
func buildDenyVariable(a interface{}) string { func buildDenyVariable(a interface{}) string {
l, ok := a.(string) l, ok := a.(string)
if !ok { if !ok {
glog.Errorf("expected a 'string' type but %T was returned", a) klog.Errorf("expected a 'string' type but %T was returned", a)
return "" return ""
} }
@ -689,7 +689,7 @@ func buildDenyVariable(a interface{}) string {
func buildUpstreamName(loc interface{}) string { func buildUpstreamName(loc interface{}) string {
location, ok := loc.(*ingress.Location) location, ok := loc.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return "" return ""
} }
@ -701,7 +701,7 @@ func buildUpstreamName(loc interface{}) string {
func buildNextUpstream(i, r interface{}) string { func buildNextUpstream(i, r interface{}) string {
nextUpstream, ok := i.(string) nextUpstream, ok := i.(string)
if !ok { if !ok {
glog.Errorf("expected a 'string' type but %T was returned", i) klog.Errorf("expected a 'string' type but %T was returned", i)
return "" return ""
} }
@ -738,13 +738,13 @@ var nginxOffsetRegex = regexp.MustCompile("^[0-9]+[kKmMgG]{0,1}$")
func isValidByteSize(input interface{}, isOffset bool) bool { func isValidByteSize(input interface{}, isOffset bool) bool {
s, ok := input.(string) s, ok := input.(string)
if !ok { if !ok {
glog.Errorf("expected an 'string' type but %T was returned", input) klog.Errorf("expected an 'string' type but %T was returned", input)
return false return false
} }
s = strings.TrimSpace(s) s = strings.TrimSpace(s)
if s == "" { if s == "" {
glog.V(2).Info("empty byte size, hence it will not be set") klog.V(2).Info("empty byte size, hence it will not be set")
return false return false
} }
@ -765,13 +765,13 @@ type ingressInformation struct {
func getIngressInformation(i, p interface{}) *ingressInformation { func getIngressInformation(i, p interface{}) *ingressInformation {
ing, ok := i.(*ingress.Ingress) ing, ok := i.(*ingress.Ingress)
if !ok { if !ok {
glog.Errorf("expected an '*ingress.Ingress' type but %T was returned", i) klog.Errorf("expected an '*ingress.Ingress' type but %T was returned", i)
return &ingressInformation{} return &ingressInformation{}
} }
path, ok := p.(string) path, ok := p.(string)
if !ok { if !ok {
glog.Errorf("expected a 'string' type but %T was returned", p) klog.Errorf("expected a 'string' type but %T was returned", p)
return &ingressInformation{} return &ingressInformation{}
} }
@ -808,7 +808,7 @@ func getIngressInformation(i, p interface{}) *ingressInformation {
func buildForwardedFor(input interface{}) string { func buildForwardedFor(input interface{}) string {
s, ok := input.(string) s, ok := input.(string)
if !ok { if !ok {
glog.Errorf("expected a 'string' type but %T was returned", input) klog.Errorf("expected a 'string' type but %T was returned", input)
return "" return ""
} }
@ -820,7 +820,7 @@ func buildForwardedFor(input interface{}) string {
func buildAuthSignURL(input interface{}) string { func buildAuthSignURL(input interface{}) string {
s, ok := input.(string) s, ok := input.(string)
if !ok { if !ok {
glog.Errorf("expected an 'string' type but %T was returned", input) klog.Errorf("expected an 'string' type but %T was returned", input)
return "" return ""
} }
@ -855,7 +855,7 @@ func randomString() string {
func buildOpentracing(input interface{}) string { func buildOpentracing(input interface{}) string {
cfg, ok := input.(config.Configuration) cfg, ok := input.(config.Configuration)
if !ok { if !ok {
glog.Errorf("expected a 'config.Configuration' type but %T was returned", input) klog.Errorf("expected a 'config.Configuration' type but %T was returned", input)
return "" return ""
} }
@ -881,7 +881,7 @@ func buildOpentracing(input interface{}) string {
func buildInfluxDB(input interface{}) string { func buildInfluxDB(input interface{}) string {
cfg, ok := input.(influxdb.Config) cfg, ok := input.(influxdb.Config)
if !ok { if !ok {
glog.Errorf("expected an 'influxdb.Config' type but %T was returned", input) klog.Errorf("expected an 'influxdb.Config' type but %T was returned", input)
return "" return ""
} }
@ -901,7 +901,7 @@ func buildInfluxDB(input interface{}) string {
func proxySetHeader(loc interface{}) string { func proxySetHeader(loc interface{}) string {
location, ok := loc.(*ingress.Location) location, ok := loc.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return "proxy_set_header" return "proxy_set_header"
} }
@ -932,7 +932,7 @@ func buildCustomErrorDeps(proxySetHeaders map[string]string, errorCodes []int, e
func collectCustomErrorsPerServer(input interface{}) []int { func collectCustomErrorsPerServer(input interface{}) []int {
server, ok := input.(*ingress.Server) server, ok := input.(*ingress.Server)
if !ok { if !ok {
glog.Errorf("expected a '*ingress.Server' type but %T was returned", input) klog.Errorf("expected a '*ingress.Server' type but %T was returned", input)
return nil return nil
} }
@ -954,7 +954,7 @@ func collectCustomErrorsPerServer(input interface{}) []int {
func opentracingPropagateContext(loc interface{}) string { func opentracingPropagateContext(loc interface{}) string {
location, ok := loc.(*ingress.Location) location, ok := loc.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return "opentracing_propagate_context" return "opentracing_propagate_context"
} }

View file

@ -24,7 +24,7 @@ import (
"fmt" "fmt"
"github.com/golang/glog" "k8s.io/klog"
api "k8s.io/api/core/v1" api "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/util/sysctl" "k8s.io/kubernetes/pkg/util/sysctl"
@ -57,7 +57,7 @@ func upstreamName(namespace string, service string, port intstr.IntOrString) str
func sysctlSomaxconn() int { func sysctlSomaxconn() int {
maxConns, err := sysctl.New().GetSysctl("net/core/somaxconn") maxConns, err := sysctl.New().GetSysctl("net/core/somaxconn")
if err != nil || maxConns < 512 { if err != nil || maxConns < 512 {
glog.V(3).Infof("net.core.somaxconn=%v (using system default)", maxConns) klog.V(3).Infof("net.core.somaxconn=%v (using system default)", maxConns)
return 511 return 511
} }
@ -70,10 +70,10 @@ func sysctlFSFileMax() int {
var rLimit syscall.Rlimit var rLimit syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit)
if err != nil { if err != nil {
glog.Errorf("Error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err) klog.Errorf("Error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err)
return 0 return 0
} }
glog.V(2).Infof("rlimit.max=%v", rLimit.Max) klog.V(2).Infof("rlimit.max=%v", rLimit.Max)
return int(rLimit.Max) return int(rLimit.Max)
} }

View file

@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/ingress-nginx/internal/ingress" "k8s.io/ingress-nginx/internal/ingress"
@ -181,11 +181,11 @@ func (cm *Controller) SetSSLExpireTime(servers []*ingress.Server) {
func (cm *Controller) RemoveMetrics(hosts []string, registry prometheus.Gatherer) { func (cm *Controller) RemoveMetrics(hosts []string, registry prometheus.Gatherer) {
mfs, err := registry.Gather() mfs, err := registry.Gather()
if err != nil { if err != nil {
glog.Errorf("Error gathering metrics: %v", err) klog.Errorf("Error gathering metrics: %v", err)
return return
} }
glog.V(2).Infof("removing SSL certificate metrics for %v hosts", hosts) klog.V(2).Infof("removing SSL certificate metrics for %v hosts", hosts)
toRemove := sets.NewString(hosts...) toRemove := sets.NewString(hosts...)
for _, mf := range mfs { for _, mf := range mfs {
@ -212,10 +212,10 @@ func (cm *Controller) RemoveMetrics(hosts []string, registry prometheus.Gatherer
continue continue
} }
glog.V(2).Infof("Removing prometheus metric from gauge %v for host %v", metricName, host) klog.V(2).Infof("Removing prometheus metric from gauge %v for host %v", metricName, host)
removed := cm.sslExpireTime.Delete(labels) removed := cm.sslExpireTime.Delete(labels)
if !removed { if !removed {
glog.V(2).Infof("metric %v for host %v with labels not removed: %v", metricName, host, labels) klog.V(2).Infof("metric %v for host %v with labels not removed: %v", metricName, host, labels)
} }
} }
} }

View file

@ -23,8 +23,8 @@ import (
"regexp" "regexp"
"strconv" "strconv"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"k8s.io/klog"
) )
var ( var (
@ -189,7 +189,7 @@ func parse(data string) *basicStatus {
func getNginxStatus(port int, path string) (*basicStatus, error) { func getNginxStatus(port int, path string) (*basicStatus, error) {
url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path) url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path)
glog.V(3).Infof("start scraping url: %v", url) klog.V(3).Infof("start scraping url: %v", url)
data, err := httpBody(url) data, err := httpBody(url)
@ -204,7 +204,7 @@ func getNginxStatus(port int, path string) (*basicStatus, error) {
func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) { func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) {
s, err := getNginxStatus(p.ngxHealthPort, p.ngxStatusPath) s, err := getNginxStatus(p.ngxHealthPort, p.ngxStatusPath)
if err != nil { if err != nil {
glog.Warningf("unexpected error obtaining nginx status info: %v", err) klog.Warningf("unexpected error obtaining nginx status info: %v", err)
return return
} }

View file

@ -17,9 +17,10 @@ limitations under the License.
package collectors package collectors
import ( import (
"fmt"
"path/filepath" "path/filepath"
"github.com/golang/glog" "k8s.io/klog"
common "github.com/ncabatoff/process-exporter" common "github.com/ncabatoff/process-exporter"
"github.com/ncabatoff/process-exporter/proc" "github.com/ncabatoff/process-exporter/proc"
@ -37,7 +38,7 @@ type Stopable interface {
Stop() Stop()
} }
// BinaryNameMatcher ... // BinaryNameMatcher define a namer using the binary name
type BinaryNameMatcher struct { type BinaryNameMatcher struct {
Name string Name string
Binary string Binary string
@ -45,7 +46,7 @@ type BinaryNameMatcher struct {
// MatchAndName returns false if the match failed, otherwise // MatchAndName returns false if the match failed, otherwise
// true and the resulting name. // true and the resulting name.
func (em BinaryNameMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) { func (em BinaryNameMatcher) MatchAndName(nacl common.ProcAttributes) (bool, string) {
if len(nacl.Cmdline) == 0 { if len(nacl.Cmdline) == 0 {
return false, "" return false, ""
} }
@ -53,6 +54,11 @@ func (em BinaryNameMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, stri
return em.Name == cmd, "" return em.Name == cmd, ""
} }
// String returns the name of the binary to match
func (em BinaryNameMatcher) String() string {
return fmt.Sprintf("%+v", em.Binary)
}
type namedProcessData struct { type namedProcessData struct {
numProcs *prometheus.Desc numProcs *prometheus.Desc
cpuSecs *prometheus.Desc cpuSecs *prometheus.Desc
@ -86,7 +92,7 @@ var binary = "/usr/bin/nginx"
// NewNGINXProcess returns a new prometheus collector for the nginx process // NewNGINXProcess returns a new prometheus collector for the nginx process
func NewNGINXProcess(pod, namespace, ingressClass string) (NGINXProcessCollector, error) { func NewNGINXProcess(pod, namespace, ingressClass string) (NGINXProcessCollector, error) {
fs, err := proc.NewFS("/proc") fs, err := proc.NewFS("/proc", false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -98,11 +104,11 @@ func NewNGINXProcess(pod, namespace, ingressClass string) (NGINXProcessCollector
p := namedProcess{ p := namedProcess{
scrapeChan: make(chan scrapeRequest), scrapeChan: make(chan scrapeRequest),
Grouper: proc.NewGrouper(true, nm), Grouper: proc.NewGrouper(nm, true, false, false),
fs: fs, fs: fs,
} }
_, err = p.Update(p.fs.AllProcs()) _, _, err = p.Update(p.fs.AllProcs())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -184,23 +190,23 @@ func (p namedProcess) Stop() {
} }
func (p namedProcess) scrape(ch chan<- prometheus.Metric) { func (p namedProcess) scrape(ch chan<- prometheus.Metric) {
_, err := p.Update(p.fs.AllProcs()) _, groups, err := p.Update(p.fs.AllProcs())
if err != nil { if err != nil {
glog.Warningf("unexpected error obtaining nginx process info: %v", err) klog.Warningf("unexpected error obtaining nginx process info: %v", err)
return return
} }
for _, gcounts := range p.Groups() { for _, gcounts := range groups {
ch <- prometheus.MustNewConstMetric(p.data.numProcs, ch <- prometheus.MustNewConstMetric(p.data.numProcs,
prometheus.GaugeValue, float64(gcounts.Procs)) prometheus.GaugeValue, float64(gcounts.Procs))
ch <- prometheus.MustNewConstMetric(p.data.memResidentbytes, ch <- prometheus.MustNewConstMetric(p.data.memResidentbytes,
prometheus.GaugeValue, float64(gcounts.Memresident)) prometheus.GaugeValue, float64(gcounts.Memory.ResidentBytes))
ch <- prometheus.MustNewConstMetric(p.data.memVirtualbytes, ch <- prometheus.MustNewConstMetric(p.data.memVirtualbytes,
prometheus.GaugeValue, float64(gcounts.Memvirtual)) prometheus.GaugeValue, float64(gcounts.Memory.VirtualBytes))
ch <- prometheus.MustNewConstMetric(p.data.startTime, ch <- prometheus.MustNewConstMetric(p.data.startTime,
prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix())) prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix()))
ch <- prometheus.MustNewConstMetric(p.data.cpuSecs, ch <- prometheus.MustNewConstMetric(p.data.cpuSecs,
prometheus.CounterValue, gcounts.Cpu) prometheus.CounterValue, gcounts.CPUSystemTime)
ch <- prometheus.MustNewConstMetric(p.data.readBytes, ch <- prometheus.MustNewConstMetric(p.data.readBytes,
prometheus.CounterValue, float64(gcounts.ReadBytes)) prometheus.CounterValue, float64(gcounts.ReadBytes))
ch <- prometheus.MustNewConstMetric(p.data.writeBytes, ch <- prometheus.MustNewConstMetric(p.data.writeBytes,

View file

@ -23,10 +23,10 @@ import (
"net" "net"
"os" "os"
"github.com/golang/glog"
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
) )
type upstream struct { type upstream struct {
@ -203,19 +203,19 @@ func NewSocketCollector(pod, namespace, class string) (*SocketCollector, error)
} }
func (sc *SocketCollector) handleMessage(msg []byte) { func (sc *SocketCollector) handleMessage(msg []byte) {
glog.V(5).Infof("msg: %v", string(msg)) klog.V(5).Infof("msg: %v", string(msg))
// Unmarshal bytes // Unmarshal bytes
var statsBatch []socketData var statsBatch []socketData
err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(msg, &statsBatch) err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(msg, &statsBatch)
if err != nil { if err != nil {
glog.Errorf("Unexpected error deserializing JSON payload: %v. Payload:\n%v", err, string(msg)) klog.Errorf("Unexpected error deserializing JSON payload: %v. Payload:\n%v", err, string(msg))
return return
} }
for _, stats := range statsBatch { for _, stats := range statsBatch {
if !sc.hosts.Has(stats.Host) { if !sc.hosts.Has(stats.Host) {
glog.V(3).Infof("skiping metric for host %v that is not being served", stats.Host) klog.V(3).Infof("skiping metric for host %v that is not being served", stats.Host)
continue continue
} }
@ -243,7 +243,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) {
requestsMetric, err := sc.requests.GetMetricWith(collectorLabels) requestsMetric, err := sc.requests.GetMetricWith(collectorLabels)
if err != nil { if err != nil {
glog.Errorf("Error fetching requests metric: %v", err) klog.Errorf("Error fetching requests metric: %v", err)
} else { } else {
requestsMetric.Inc() requestsMetric.Inc()
} }
@ -251,7 +251,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) {
if stats.Latency != -1 { if stats.Latency != -1 {
latencyMetric, err := sc.upstreamLatency.GetMetricWith(latencyLabels) latencyMetric, err := sc.upstreamLatency.GetMetricWith(latencyLabels)
if err != nil { if err != nil {
glog.Errorf("Error fetching latency metric: %v", err) klog.Errorf("Error fetching latency metric: %v", err)
} else { } else {
latencyMetric.Observe(stats.Latency) latencyMetric.Observe(stats.Latency)
} }
@ -260,7 +260,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) {
if stats.RequestTime != -1 { if stats.RequestTime != -1 {
requestTimeMetric, err := sc.requestTime.GetMetricWith(requestLabels) requestTimeMetric, err := sc.requestTime.GetMetricWith(requestLabels)
if err != nil { if err != nil {
glog.Errorf("Error fetching request duration metric: %v", err) klog.Errorf("Error fetching request duration metric: %v", err)
} else { } else {
requestTimeMetric.Observe(stats.RequestTime) requestTimeMetric.Observe(stats.RequestTime)
} }
@ -269,7 +269,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) {
if stats.RequestLength != -1 { if stats.RequestLength != -1 {
requestLengthMetric, err := sc.requestLength.GetMetricWith(requestLabels) requestLengthMetric, err := sc.requestLength.GetMetricWith(requestLabels)
if err != nil { if err != nil {
glog.Errorf("Error fetching request length metric: %v", err) klog.Errorf("Error fetching request length metric: %v", err)
} else { } else {
requestLengthMetric.Observe(stats.RequestLength) requestLengthMetric.Observe(stats.RequestLength)
} }
@ -278,7 +278,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) {
if stats.ResponseTime != -1 { if stats.ResponseTime != -1 {
responseTimeMetric, err := sc.responseTime.GetMetricWith(requestLabels) responseTimeMetric, err := sc.responseTime.GetMetricWith(requestLabels)
if err != nil { if err != nil {
glog.Errorf("Error fetching upstream response time metric: %v", err) klog.Errorf("Error fetching upstream response time metric: %v", err)
} else { } else {
responseTimeMetric.Observe(stats.ResponseTime) responseTimeMetric.Observe(stats.ResponseTime)
} }
@ -287,14 +287,14 @@ func (sc *SocketCollector) handleMessage(msg []byte) {
if stats.ResponseLength != -1 { if stats.ResponseLength != -1 {
bytesSentMetric, err := sc.bytesSent.GetMetricWith(requestLabels) bytesSentMetric, err := sc.bytesSent.GetMetricWith(requestLabels)
if err != nil { if err != nil {
glog.Errorf("Error fetching bytes sent metric: %v", err) klog.Errorf("Error fetching bytes sent metric: %v", err)
} else { } else {
bytesSentMetric.Observe(stats.ResponseLength) bytesSentMetric.Observe(stats.ResponseLength)
} }
responseSizeMetric, err := sc.responseLength.GetMetricWith(requestLabels) responseSizeMetric, err := sc.responseLength.GetMetricWith(requestLabels)
if err != nil { if err != nil {
glog.Errorf("Error fetching bytes sent metric: %v", err) klog.Errorf("Error fetching bytes sent metric: %v", err)
} else { } else {
responseSizeMetric.Observe(stats.ResponseLength) responseSizeMetric.Observe(stats.ResponseLength)
} }
@ -325,12 +325,12 @@ func (sc *SocketCollector) Stop() {
func (sc *SocketCollector) RemoveMetrics(ingresses []string, registry prometheus.Gatherer) { func (sc *SocketCollector) RemoveMetrics(ingresses []string, registry prometheus.Gatherer) {
mfs, err := registry.Gather() mfs, err := registry.Gather()
if err != nil { if err != nil {
glog.Errorf("Error gathering metrics: %v", err) klog.Errorf("Error gathering metrics: %v", err)
return return
} }
// 1. remove metrics of removed ingresses // 1. remove metrics of removed ingresses
glog.V(2).Infof("removing ingresses %v from metrics", ingresses) klog.V(2).Infof("removing ingresses %v from metrics", ingresses)
for _, mf := range mfs { for _, mf := range mfs {
metricName := mf.GetName() metricName := mf.GetName()
metric, ok := sc.metricMapping[metricName] metric, ok := sc.metricMapping[metricName]
@ -362,13 +362,13 @@ func (sc *SocketCollector) RemoveMetrics(ingresses []string, registry prometheus
continue continue
} }
glog.V(2).Infof("Removing prometheus metric from histogram %v for ingress %v", metricName, ingKey) klog.V(2).Infof("Removing prometheus metric from histogram %v for ingress %v", metricName, ingKey)
h, ok := metric.(*prometheus.HistogramVec) h, ok := metric.(*prometheus.HistogramVec)
if ok { if ok {
removed := h.Delete(labels) removed := h.Delete(labels)
if !removed { if !removed {
glog.V(2).Infof("metric %v for ingress %v with labels not removed: %v", metricName, ingKey, labels) klog.V(2).Infof("metric %v for ingress %v with labels not removed: %v", metricName, ingKey, labels)
} }
} }
@ -376,7 +376,7 @@ func (sc *SocketCollector) RemoveMetrics(ingresses []string, registry prometheus
if ok { if ok {
removed := s.Delete(labels) removed := s.Delete(labels)
if !removed { if !removed {
glog.V(2).Infof("metric %v for ingress %v with labels not removed: %v", metricName, ingKey, labels) klog.V(2).Infof("metric %v for ingress %v with labels not removed: %v", metricName, ingKey, labels)
} }
} }
} }

View file

@ -25,8 +25,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/golang/glog"
"github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/klog"
pool "gopkg.in/go-playground/pool.v3" pool "gopkg.in/go-playground/pool.v3"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
@ -123,7 +123,7 @@ func (s statusSync) Run() {
var stopCh chan struct{} var stopCh chan struct{}
callbacks := leaderelection.LeaderCallbacks{ callbacks := leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) { OnStartedLeading: func(ctx context.Context) {
glog.V(2).Infof("I am the new status update leader") klog.V(2).Infof("I am the new status update leader")
stopCh = make(chan struct{}) stopCh = make(chan struct{})
go s.syncQueue.Run(time.Second, stopCh) go s.syncQueue.Run(time.Second, stopCh)
// trigger initial sync // trigger initial sync
@ -136,7 +136,7 @@ func (s statusSync) Run() {
}, stopCh) }, stopCh)
}, },
OnStoppedLeading: func() { OnStoppedLeading: func() {
glog.V(2).Info("I am not status update leader anymore") klog.V(2).Info("I am not status update leader anymore")
close(stopCh) close(stopCh)
// cancel the context // cancel the context
@ -145,7 +145,7 @@ func (s statusSync) Run() {
cancelContext = newLeaderCtx(ctx) cancelContext = newLeaderCtx(ctx)
}, },
OnNewLeader: func(identity string) { OnNewLeader: func(identity string) {
glog.Infof("new leader elected: %v", identity) klog.Infof("new leader elected: %v", identity)
}, },
} }
@ -176,7 +176,7 @@ func (s statusSync) Run() {
Callbacks: callbacks, Callbacks: callbacks,
}) })
if err != nil { if err != nil {
glog.Fatalf("unexpected error starting leader election: %v", err) klog.Fatalf("unexpected error starting leader election: %v", err)
} }
cancelContext = newLeaderCtx(ctx) cancelContext = newLeaderCtx(ctx)
@ -193,36 +193,36 @@ func (s statusSync) Shutdown() {
} }
if !s.UpdateStatusOnShutdown { if !s.UpdateStatusOnShutdown {
glog.Warningf("skipping update of status of Ingress rules") klog.Warningf("skipping update of status of Ingress rules")
return return
} }
glog.Info("updating status of Ingress rules (remove)") klog.Info("updating status of Ingress rules (remove)")
addrs, err := s.runningAddresses() addrs, err := s.runningAddresses()
if err != nil { if err != nil {
glog.Errorf("error obtaining running IPs: %v", addrs) klog.Errorf("error obtaining running IPs: %v", addrs)
return return
} }
if len(addrs) > 1 { if len(addrs) > 1 {
// leave the job to the next leader // leave the job to the next leader
glog.Infof("leaving status update for next leader (%v)", len(addrs)) klog.Infof("leaving status update for next leader (%v)", len(addrs))
return return
} }
if s.isRunningMultiplePods() { if s.isRunningMultiplePods() {
glog.V(2).Infof("skipping Ingress status update (multiple pods running - another one will be elected as master)") klog.V(2).Infof("skipping Ingress status update (multiple pods running - another one will be elected as master)")
return return
} }
glog.Infof("removing address from ingress status (%v)", addrs) klog.Infof("removing address from ingress status (%v)", addrs)
s.updateStatus([]apiv1.LoadBalancerIngress{}) s.updateStatus([]apiv1.LoadBalancerIngress{})
} }
func (s *statusSync) sync(key interface{}) error { func (s *statusSync) sync(key interface{}) error {
if s.syncQueue.IsShuttingDown() { if s.syncQueue.IsShuttingDown() {
glog.V(2).Infof("skipping Ingress status update (shutting down in progress)") klog.V(2).Infof("skipping Ingress status update (shutting down in progress)")
return nil return nil
} }
@ -247,7 +247,7 @@ func (s statusSync) keyfunc(input interface{}) (interface{}, error) {
func NewStatusSyncer(config Config) Sync { func NewStatusSyncer(config Config) Sync {
pod, err := k8s.GetPodDetails(config.Client) pod, err := k8s.GetPodDetails(config.Client)
if err != nil { if err != nil {
glog.Fatalf("unexpected error obtaining pod information: %v", err) klog.Fatalf("unexpected error obtaining pod information: %v", err)
} }
st := statusSync{ st := statusSync{
@ -360,7 +360,7 @@ func (s *statusSync) updateStatus(newIngressPoint []apiv1.LoadBalancerIngress) {
curIPs := ing.Status.LoadBalancer.Ingress curIPs := ing.Status.LoadBalancer.Ingress
sort.SliceStable(curIPs, lessLoadBalancerIngress(curIPs)) sort.SliceStable(curIPs, lessLoadBalancerIngress(curIPs))
if ingressSliceEqual(curIPs, newIngressPoint) { if ingressSliceEqual(curIPs, newIngressPoint) {
glog.V(3).Infof("skipping update of Ingress %v/%v (no change)", ing.Namespace, ing.Name) klog.V(3).Infof("skipping update of Ingress %v/%v (no change)", ing.Namespace, ing.Name)
continue continue
} }
@ -385,11 +385,11 @@ func runUpdate(ing *ingress.Ingress, status []apiv1.LoadBalancerIngress,
return nil, errors.Wrap(err, fmt.Sprintf("unexpected error searching Ingress %v/%v", ing.Namespace, ing.Name)) return nil, errors.Wrap(err, fmt.Sprintf("unexpected error searching Ingress %v/%v", ing.Namespace, ing.Name))
} }
glog.Infof("updating Ingress %v/%v status to %v", currIng.Namespace, currIng.Name, status) klog.Infof("updating Ingress %v/%v status to %v", currIng.Namespace, currIng.Name, status)
currIng.Status.LoadBalancer.Ingress = status currIng.Status.LoadBalancer.Ingress = status
_, err = ingClient.UpdateStatus(currIng) _, err = ingClient.UpdateStatus(currIng)
if err != nil { if err != nil {
glog.Warningf("error updating ingress rule: %v", err) klog.Warningf("error updating ingress rule: %v", err)
} }
return true, nil return true, nil

View file

@ -21,11 +21,11 @@ import (
"os" "os"
"strings" "strings"
"github.com/golang/glog"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/klog"
) )
// ParseNameNS parses a string searching a namespace and name // ParseNameNS parses a string searching a namespace and name
@ -42,7 +42,7 @@ func ParseNameNS(input string) (string, string, error) {
func GetNodeIPOrName(kubeClient clientset.Interface, name string, useInternalIP bool) string { func GetNodeIPOrName(kubeClient clientset.Interface, name string, useInternalIP bool) string {
node, err := kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{}) node, err := kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
glog.Errorf("Error getting node %v: %v", name, err) klog.Errorf("Error getting node %v: %v", name, err)
return "" return ""
} }
@ -104,7 +104,7 @@ func GetPodDetails(kubeClient clientset.Interface) (*PodInfo, error) {
func MetaNamespaceKey(obj interface{}) string { func MetaNamespaceKey(obj interface{}) string {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil { if err != nil {
glog.Warning(err) klog.Warning(err)
} }
return key return key

View file

@ -21,7 +21,7 @@ import (
"net" "net"
"strings" "strings"
"github.com/golang/glog" "k8s.io/klog"
) )
var defResolvConf = "/etc/resolv.conf" var defResolvConf = "/etc/resolv.conf"
@ -53,6 +53,6 @@ func GetSystemNameServers() ([]net.IP, error) {
} }
} }
glog.V(3).Infof("nameservers IP address/es to use: %v", nameservers) klog.V(3).Infof("nameservers IP address/es to use: %v", nameservers)
return nameservers, nil return nameservers, nil
} }

View file

@ -32,8 +32,8 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/golang/glog"
"github.com/zakjan/cert-chain-resolver/certUtil" "github.com/zakjan/cert-chain-resolver/certUtil"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
@ -56,7 +56,7 @@ func AddOrUpdateCertAndKey(name string, cert, key, ca []byte,
if err != nil { if err != nil {
return nil, fmt.Errorf("could not create temp pem file %v: %v", pemFileName, err) return nil, fmt.Errorf("could not create temp pem file %v: %v", pemFileName, err)
} }
glog.V(3).Infof("Creating temp file %v for Keypair: %v", tempPemFile.Name(), pemName) klog.V(3).Infof("Creating temp file %v for Keypair: %v", tempPemFile.Name(), pemName)
_, err = tempPemFile.Write(cert) _, err = tempPemFile.Write(cert)
if err != nil { if err != nil {
@ -110,11 +110,11 @@ func AddOrUpdateCertAndKey(name string, cert, key, ca []byte,
} }
if len(pemCert.Extensions) > 0 { if len(pemCert.Extensions) > 0 {
glog.V(3).Info("parsing ssl certificate extensions") klog.V(3).Info("parsing ssl certificate extensions")
for _, ext := range getExtension(pemCert, oidExtensionSubjectAltName) { for _, ext := range getExtension(pemCert, oidExtensionSubjectAltName) {
dns, _, _, err := parseSANExtension(ext.Value) dns, _, _, err := parseSANExtension(ext.Value)
if err != nil { if err != nil {
glog.Warningf("unexpected error parsing certificate extensions: %v", err) klog.Warningf("unexpected error parsing certificate extensions: %v", err)
continue continue
} }
@ -224,11 +224,11 @@ func CreateSSLCert(name string, cert, key, ca []byte) (*ingress.SSLCert, error)
} }
if len(pemCert.Extensions) > 0 { if len(pemCert.Extensions) > 0 {
glog.V(3).Info("parsing ssl certificate extensions") klog.V(3).Info("parsing ssl certificate extensions")
for _, ext := range getExtension(pemCert, oidExtensionSubjectAltName) { for _, ext := range getExtension(pemCert, oidExtensionSubjectAltName) {
dns, _, _, err := parseSANExtension(ext.Value) dns, _, _, err := parseSANExtension(ext.Value)
if err != nil { if err != nil {
glog.Warningf("unexpected error parsing certificate extensions: %v", err) klog.Warningf("unexpected error parsing certificate extensions: %v", err)
continue continue
} }
@ -366,7 +366,7 @@ func AddCertAuth(name string, ca []byte, fs file.Filesystem) (*ingress.SSLCert,
return nil, fmt.Errorf("could not write CA file %v: %v", caFileName, err) return nil, fmt.Errorf("could not write CA file %v: %v", caFileName, err)
} }
glog.V(3).Infof("Created CA Certificate for Authentication: %v", caFileName) klog.V(3).Infof("Created CA Certificate for Authentication: %v", caFileName)
return &ingress.SSLCert{ return &ingress.SSLCert{
Certificate: pemCert, Certificate: pemCert,
CAFileName: caFileName, CAFileName: caFileName,
@ -382,7 +382,7 @@ func AddOrUpdateDHParam(name string, dh []byte, fs file.Filesystem) (string, err
tempPemFile, err := fs.TempFile(file.DefaultSSLDirectory, pemName) tempPemFile, err := fs.TempFile(file.DefaultSSLDirectory, pemName)
glog.V(3).Infof("Creating temp file %v for DH param: %v", tempPemFile.Name(), pemName) klog.V(3).Infof("Creating temp file %v for DH param: %v", tempPemFile.Name(), pemName)
if err != nil { if err != nil {
return "", fmt.Errorf("could not create temp pem file %v: %v", pemFileName, err) return "", fmt.Errorf("could not create temp pem file %v: %v", pemFileName, err)
} }
@ -432,7 +432,7 @@ func GetFakeSSLCert() ([]byte, []byte) {
priv, err = rsa.GenerateKey(rand.Reader, 2048) priv, err = rsa.GenerateKey(rand.Reader, 2048)
if err != nil { if err != nil {
glog.Fatalf("failed to generate fake private key: %s", err) klog.Fatalf("failed to generate fake private key: %s", err)
} }
notBefore := time.Now() notBefore := time.Now()
@ -443,7 +443,7 @@ func GetFakeSSLCert() ([]byte, []byte) {
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil { if err != nil {
glog.Fatalf("failed to generate fake serial number: %s", err) klog.Fatalf("failed to generate fake serial number: %s", err)
} }
template := x509.Certificate{ template := x509.Certificate{
@ -462,7 +462,7 @@ func GetFakeSSLCert() ([]byte, []byte) {
} }
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.(*rsa.PrivateKey).PublicKey, priv) derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.(*rsa.PrivateKey).PublicKey, priv)
if err != nil { if err != nil {
glog.Fatalf("Failed to create fake certificate: %s", err) klog.Fatalf("Failed to create fake certificate: %s", err)
} }
cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})

View file

@ -18,20 +18,20 @@ package ssl
import ( import (
"bytes" "bytes"
"crypto/rsa"
"crypto/x509" "crypto/x509"
"fmt" "fmt"
"testing" "testing"
"time" "time"
certutil "k8s.io/client-go/util/cert" certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/cert/triple"
"k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/file"
) )
// generateRSACerts generates a self signed certificate using a self generated ca // generateRSACerts generates a self signed certificate using a self generated ca
func generateRSACerts(host string) (*triple.KeyPair, *triple.KeyPair, error) { func generateRSACerts(host string) (*keyPair, *keyPair, error) {
ca, err := triple.NewCA("self-sign-ca") ca, err := newCA("self-sign-ca")
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -50,7 +50,7 @@ func generateRSACerts(host string) (*triple.KeyPair, *triple.KeyPair, error) {
return nil, nil, fmt.Errorf("unable to sign the server certificate: %v", err) return nil, nil, fmt.Errorf("unable to sign the server certificate: %v", err)
} }
return &triple.KeyPair{ return &keyPair{
Key: key, Key: key,
Cert: cert, Cert: cert,
}, ca, nil }, ca, nil
@ -182,3 +182,26 @@ func TestCreateSSLCert(t *testing.T) {
t.Fatalf("expected cname echoheaders but %v returned", ngxCert.CN[0]) t.Fatalf("expected cname echoheaders but %v returned", ngxCert.CN[0])
} }
} }
type keyPair struct {
Key *rsa.PrivateKey
Cert *x509.Certificate
}
func newCA(name string) (*keyPair, error) {
key, err := certutil.NewPrivateKey()
if err != nil {
return nil, fmt.Errorf("unable to create a private key for a new CA: %v", err)
}
config := certutil.Config{
CommonName: name,
}
cert, err := certutil.NewSelfSignedCACert(config, key)
if err != nil {
return nil, fmt.Errorf("unable to create a self-signed certificate for a new CA: %v", err)
}
return &keyPair{
Key: key,
Cert: cert,
}, nil
}

View file

@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/golang/glog" "k8s.io/klog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -75,7 +75,7 @@ func (t *Queue) EnqueueSkippableTask(obj interface{}) {
// enqueue enqueues ns/name of the given api object in the task queue. // enqueue enqueues ns/name of the given api object in the task queue.
func (t *Queue) enqueue(obj interface{}, skippable bool) { func (t *Queue) enqueue(obj interface{}, skippable bool) {
if t.IsShuttingDown() { if t.IsShuttingDown() {
glog.Errorf("queue has been shutdown, failed to enqueue: %v", obj) klog.Errorf("queue has been shutdown, failed to enqueue: %v", obj)
return return
} }
@ -84,10 +84,10 @@ func (t *Queue) enqueue(obj interface{}, skippable bool) {
// make sure the timestamp is bigger than lastSync // make sure the timestamp is bigger than lastSync
ts = time.Now().Add(24 * time.Hour).UnixNano() ts = time.Now().Add(24 * time.Hour).UnixNano()
} }
glog.V(3).Infof("queuing item %v", obj) klog.V(3).Infof("queuing item %v", obj)
key, err := t.fn(obj) key, err := t.fn(obj)
if err != nil { if err != nil {
glog.Errorf("%v", err) klog.Errorf("%v", err)
return return
} }
t.queue.Add(Element{ t.queue.Add(Element{
@ -119,15 +119,15 @@ func (t *Queue) worker() {
item := key.(Element) item := key.(Element)
if t.lastSync > item.Timestamp { if t.lastSync > item.Timestamp {
glog.V(3).Infof("skipping %v sync (%v > %v)", item.Key, t.lastSync, item.Timestamp) klog.V(3).Infof("skipping %v sync (%v > %v)", item.Key, t.lastSync, item.Timestamp)
t.queue.Forget(key) t.queue.Forget(key)
t.queue.Done(key) t.queue.Done(key)
continue continue
} }
glog.V(3).Infof("syncing %v", item.Key) klog.V(3).Infof("syncing %v", item.Key)
if err := t.sync(key); err != nil { if err := t.sync(key); err != nil {
glog.Warningf("requeuing %v, err %v", item.Key, err) klog.Warningf("requeuing %v, err %v", item.Key, err)
t.queue.AddRateLimited(Element{ t.queue.AddRateLimited(Element{
Key: item.Key, Key: item.Key,
Timestamp: time.Now().UnixNano(), Timestamp: time.Now().UnixNano(),

View file

@ -19,8 +19,8 @@ package e2e
import ( import (
"testing" "testing"
"github.com/golang/glog"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
"k8s.io/ingress-nginx/test/e2e/framework" "k8s.io/ingress-nginx/test/e2e/framework"
) )
@ -29,7 +29,7 @@ func init() {
framework.RegisterParseFlags() framework.RegisterParseFlags()
if "" == framework.TestContext.KubeConfig { if "" == framework.TestContext.KubeConfig {
glog.Fatalf("environment variable %v must be set", clientcmd.RecommendedConfigPathEnvVar) klog.Fatalf("environment variable %v must be set", clientcmd.RecommendedConfigPathEnvVar)
} }
} }
func TestE2E(t *testing.T) { func TestE2E(t *testing.T) {

View file

@ -30,8 +30,8 @@ import (
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"github.com/golang/glog"
"github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/klog"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -235,8 +235,8 @@ func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) b
var match bool var match bool
errs := InterceptGomegaFailures(func() { errs := InterceptGomegaFailures(func() {
if glog.V(10) && len(o) > 0 { if klog.V(10) && len(o) > 0 {
glog.Infof("nginx.conf:\n%v", o) klog.Infof("nginx.conf:\n%v", o)
} }
// passes the nginx config to the passed function // passes the nginx config to the passed function
@ -250,7 +250,7 @@ func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) b
} }
if len(errs) > 0 { if len(errs) > 0 {
glog.V(2).Infof("Errors waiting for conditions: %v", errs) klog.V(2).Infof("Errors waiting for conditions: %v", errs)
} }
return false, nil return false, nil
@ -329,7 +329,7 @@ func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name
} }
if *deployment.Spec.Replicas != int32(replicas) { if *deployment.Spec.Replicas != int32(replicas) {
glog.Infof("updating replica count from %v to %v...", *deployment.Spec.Replicas, replicas) klog.Infof("updating replica count from %v to %v...", *deployment.Spec.Replicas, replicas)
deployment, err := kubeClientSet.AppsV1beta1().Deployments(namespace).Get(name, metav1.GetOptions{}) deployment, err := kubeClientSet.AppsV1beta1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err

View file

@ -1,25 +0,0 @@
sudo: false
language: go
go:
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
install:
- go get -v cloud.google.com/go/...
script:
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d
- tar xvf keys.tar
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762"
GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json"
GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests"
GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json"
GCLOUD_TESTS_GOLANG_KEYRING="projects/dulcet-port-762/locations/us/keyRings/go-integration-test"
GCLOUD_TESTS_GOLANG_ENABLE_REPLAY=yes
travis_wait ./run-tests.sh $TRAVIS_COMMIT
env:
matrix:
# The GCLOUD_TESTS_API_KEY environment variable.
secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI=

103
vendor/cloud.google.com/go/CHANGES.md generated vendored
View file

@ -1,5 +1,108 @@
# Changes # Changes
## v0.33.1
all: release v0.33.1
- compute: Removes an erroneously added go.mod.
- logging: Populate source location in fromLogEntry.
## v0.33.0
- bttest:
- Add support for apply_label_transformer.
- expr:
- Add expr library.
- firestore:
- Support retrieval of missing documents.
- kms:
- Add IAM methods.
- pubsub:
- Clarify extension documentation.
- scheduler:
- Add v1beta1 client.
- vision:
- Add product search helper.
- Add new product search client.
## v0.32.0
Note: This release is the last to support Go 1.6 and 1.8.
- bigquery:
- Add support for removing an expiration.
- Ignore NeverExpire in Table.Create.
- Validate table expiration time.
- cbt:
- Add note about not supporting arbitrary bytes.
- datastore:
- Align key checks.
- firestore:
- Return an error when using Start/End without providing values.
- pubsub:
- Add pstest Close method.
- Clarify MaxExtension documentation.
- securitycenter:
- Add v1beta1 client.
- spanner:
- Allow nil in mutations.
- Improve doc of SessionPoolConfig.MaxOpened.
- Increase session deletion timeout from 5s to 15s.
## v0.31.0
- bigtable:
- Group mutations across multiple requests.
- bigquery:
- Link to bigquery troubleshooting errors page in bigquery.Error comment.
- cbt:
- Fix go generate command.
- Document usage of both maxage + maxversions.
- datastore:
- Passing nil keys results in ErrInvalidKey.
- firestore:
- Clarify what Document.DataTo does with untouched struct fields.
- profile:
- Validate service name in agent.
- pubsub:
- Fix deadlock with pstest and ctx.Cancel.
- Fix a possible deadlock in pstest.
- trace:
- Update doc URL with new fragment.
Special thanks to @fastest963 for going above and beyond helping us to debug
hard-to-reproduce Pub/Sub issues.
## v0.30.0
- spanner: DML support added. See https://godoc.org/cloud.google.com/go/spanner#hdr-DML_and_Partitioned_DML for more information.
- bigtable: bttest supports row sample filter.
- functions: metadata package added for accessing Cloud Functions resource metadata.
## v0.29.0
- bigtable:
- Add retry to all idempotent RPCs.
- cbt supports complex GC policies.
- Emulator supports arbitrary bytes in regex filters.
- firestore: Add ArrayUnion and ArrayRemove.
- logging: Add the ContextFunc option to supply the context used for
asynchronous RPCs.
- profiler: Ignore NotDefinedError when fetching the instance name
- pubsub:
- BEHAVIOR CHANGE: Receive doesn't retry if an RPC returns codes.Cancelled.
- BEHAVIOR CHANGE: Receive retries on Unavailable intead of returning.
- Fix deadlock.
- Restore Ack/Nack/Modacks metrics.
- Improve context handling in iterator.
- Implement synchronous mode for Receive.
- pstest: add Pull.
- spanner: Add a metric for the number of sessions currently opened.
- storage:
- Canceling the context releases all resources.
- Add additional RetentionPolicy attributes.
- vision/apiv1: Add LocalizeObjects method.
## v0.28.0 ## v0.28.0
- bigtable: - bigtable:

View file

@ -1,117 +1,177 @@
# Contributing # Contributing
1. Sign one of the contributor license agreements below. 1. Sign one of the contributor license agreements below.
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. 1. `go get golang.org/x/review/git-codereview` to install the code reviewing
tool.
1. You will need to ensure that your `GOBIN` directory (by default 1. You will need to ensure that your `GOBIN` directory (by default
`$GOPATH/bin`) is in your `PATH` so that git can find the command. `$GOPATH/bin`) is in your `PATH` so that git can find the command.
1. If you would like, you may want to set up aliases for git-codereview, 1. If you would like, you may want to set up aliases for git-codereview,
such that `git codereview change` becomes `git change`. See the such that `git codereview change` becomes `git change`. See the
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details. [godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
1. Should you run into issues with the git-codereview tool, please note 1. Should you run into issues with the git-codereview tool, please note
that all error messages will assume that you have set up these that all error messages will assume that you have set up these aliases.
aliases.
1. Get the cloud package by running `go get -d cloud.google.com/go`. 1. Get the cloud package by running `go get -d cloud.google.com/go`.
1. If you have already checked out the source, make sure that the remote git 1. If you have already checked out the source, make sure that the remote
origin is https://code.googlesource.com/gocloud: git origin is https://code.googlesource.com/gocloud:
```
git remote set-url origin https://code.googlesource.com/gocloud
```
git remote set-url origin https://code.googlesource.com/gocloud
1. Make sure your auth is configured correctly by visiting 1. Make sure your auth is configured correctly by visiting
https://code.googlesource.com, clicking "Generate Password", and following https://code.googlesource.com, clicking "Generate Password", and following the
the directions. directions.
1. Make changes and create a change by running `git codereview change <name>`, 1. Make changes and create a change by running `git codereview change <name>`,
provide a commit message, and use `git codereview mail` to create a Gerrit CL. provide a commit message, and use `git codereview mail` to create a Gerrit CL.
1. Keep amending to the change with `git codereview change` and mail as your receive 1. Keep amending to the change with `git codereview change` and mail as your
feedback. Each new mailed amendment will create a new patch set for your change in Gerrit. receive feedback. Each new mailed amendment will create a new patch set for
your change in Gerrit.
## Integration Tests ## Integration Tests
In addition to the unit tests, you may run the integration test suite. In addition to the unit tests, you may run the integration test suite. These
directions describe setting up your environment to run integration tests for
_all_ packages: note that many of these instructions may be redundant if you
intend only to run integration tests on a single package.
To run the integrations tests, creating and configuration of a project in the #### GCP Setup
Google Developers Console is required.
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount). To run the integrations tests, creation and configuration of two projects in
Ensure the project-level **Owner** the Google Developers Console is required: one specifically for Firestore
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the integration tests, and another for all other integration tests. We'll refer to
service account. Alternatively, the account can be granted all of the following roles: these projects as "general project" and "Firestore project".
- **Editor**
- **Logs Configuration Writer**
- **PubSub Admin**
Once you create a project, set the following environment variables to be able to After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount)
run the against the actual APIs. for each project. Ensure the project-level **Owner**
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to
each service account. During the creation of the service account, you should
download the JSON credential file for use later.
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) Next, ensure the following APIs are enabled in the general project:
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
Some packages require additional environment variables to be set: - BigQuery API
- BigQuery Data Transfer API
- Cloud Dataproc API
- Cloud Dataproc Control API Private
- Cloud Datastore API
- Cloud Firestore API
- Cloud Key Management Service (KMS) API
- Cloud Natural Language API
- Cloud OS Login API
- Cloud Pub/Sub API
- Cloud Resource Manager API
- Cloud Spanner API
- Cloud Speech API
- Cloud Translation API
- Cloud Video Intelligence API
- Cloud Vision API
- Compute Engine API
- Compute Engine Instance Group Manager API
- Container Registry API
- Firebase Rules API
- Google Cloud APIs
- Google Cloud Deployment Manager V2 API
- Google Cloud SQL
- Google Cloud Storage
- Google Cloud Storage JSON API
- Google Compute Engine Instance Group Updater API
- Google Compute Engine Instance Groups API
- Kubernetes Engine API
- Stackdriver Error Reporting API
- firestore Next, create a Datastore database in the general project, and a Firestore
- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: project ID for Firestore. database in the Firestore project.
- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
- storage
- **GCLOUD_TESTS_GOLANG_KEYRING**: The full name of the keyring for the tests, in the
form "projects/P/locations/L/keyRings/R".
- translate
- **GCLOUD_TESTS_API_KEY**: API key for using the Translate API.
- profiler
- **GCLOUD_TESTS_GOLANG_ZONE**: Compute Engine zone.
Some packages can record the RPCs during integration tests to a file for Finally, in the general project, create an API key for the translate API:
subsequent replay. To record, pass the `-record` flag to `go test`. The
recording will be saved to the _package_`.replay` file. To replay integration
tests from a saved recording, the replay file must be present, the `-short` flag
must be passed to `go test`, and the **GCLOUD_TESTS_GOLANG_ENABLE_REPLAY**
environment variable must have a non-empty value.
Install the [gcloud command-line tool][gcloudcli] to your machine and use it - Go to GCP Developer Console.
to create some resources used in integration tests. - Navigate to APIs & Services > Credentials.
- Click Create Credentials > API Key.
- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below.
#### Local Setup
Once the two projects are created and configured, set the following environment
variables:
- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g.
bamboo-shift-455) for the general project.
- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general
project's service account.
- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID
(e.g. doorway-cliff-677) for the Firestore project.
- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
Firestore project's service account.
- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
in the form
"projects/P/locations/L/keyRings/R". The creation of this is described below.
- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API.
- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
create some resources used in integration tests.
From the project's root directory: From the project's root directory:
``` sh ``` sh
# Set the default project in your env. # Sets the default project in your env.
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID $ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
# Authenticate the gcloud tool with your account. # Authenticates the gcloud tool with your account.
$ gcloud auth login $ gcloud auth login
# Create the indexes used in the datastore integration tests. # Create the indexes used in the datastore integration tests.
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml $ gcloud datastore create-indexes datastore/testdata/index.yaml
# Create a Google Cloud storage bucket with the same name as your test project, # Creates a Google Cloud storage bucket with the same name as your test project,
# and with the Stackdriver Logging service account as owner, for the sink # and with the Stackdriver Logging service account as owner, for the sink
# integration tests in logging. # integration tests in logging.
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
# Create a PubSub topic for integration tests of storage notifications. # Creates a PubSub topic for integration tests of storage notifications.
$ gcloud beta pubsub topics create go-storage-notification-test $ gcloud beta pubsub topics create go-storage-notification-test
# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
# "service-<numberic project id>@gs-project-accounts.iam.gserviceaccount.com"
# as a publisher to that topic.
# Create a Spanner instance for the spanner integration tests. # Creates a Spanner instance for the spanner integration tests.
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test' $ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test'
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete # NOTE: Spanner instances are priced by the node-hour, so you may want to
# the instance after testing with 'gcloud beta spanner instances delete'. # delete the instance after testing with 'gcloud beta spanner instances delete'.
# For Storage integration tests: $ export MY_KEYRING=some-keyring-name
# Enable KMS for your project in the Cloud Console. $ export MY_LOCATION=global
# Create a KMS keyring, in the same location as the default location for your project's buckets. # Creates a KMS keyring, in the same location as the default location for your
$ gcloud kms keyrings create MY_KEYRING --location MY_LOCATION # project's buckets.
# Create two keys in the keyring, named key1 and key2. $ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION
$ gcloud kms keys create key1 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption # Creates two keys in the keyring, named key1 and key2.
$ gcloud kms keys create key2 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption $ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
# As mentioned above, set the GCLOUD_TESTS_GOLANG_KEYRING environment variable. $ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/MY_LOCATION/keyRings/MY_KEYRING # Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
# Authorize Google Cloud Storage to encrypt and decrypt using key1. $ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
``` ```
Once you've done the necessary setup, you can run the integration tests by running: #### Running
Once you've done the necessary setup, you can run the integration tests by
running:
``` sh ``` sh
$ go test -v cloud.google.com/go/... $ go test -v cloud.google.com/go/...
``` ```
#### Replay
Some packages can record the RPCs during integration tests to a file for
subsequent replay. To record, pass the `-record` flag to `go test`. The
recording will be saved to the _package_`.replay` file. To replay integration
tests from a saved recording, the replay file must be present, the `-short`
flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY`
environment variable must have a non-empty value.
## Contributor License Agreements ## Contributor License Agreements
Before we can accept your pull requests you'll need to sign a Contributor Before we can accept your pull requests you'll need to sign a Contributor

View file

@ -54,6 +54,7 @@ Google API | Status | Package
[Asset][cloud-asset] | alpha | [`godoc.org/cloud.google.com/go/asset/v1beta`][cloud-asset-ref] [Asset][cloud-asset] | alpha | [`godoc.org/cloud.google.com/go/asset/v1beta`][cloud-asset-ref]
[BigQuery][cloud-bigquery] | stable | [`godoc.org/cloud.google.com/go/bigquery`][cloud-bigquery-ref] [BigQuery][cloud-bigquery] | stable | [`godoc.org/cloud.google.com/go/bigquery`][cloud-bigquery-ref]
[Bigtable][cloud-bigtable] | stable | [`godoc.org/cloud.google.com/go/bigtable`][cloud-bigtable-ref] [Bigtable][cloud-bigtable] | stable | [`godoc.org/cloud.google.com/go/bigtable`][cloud-bigtable-ref]
[Cloudtasks][cloud-tasks] | beta | [`godoc.org/cloud.google.com/go/cloudtasks/apiv2beta3`][cloud-tasks-ref]
[Container][cloud-container] | stable | [`godoc.org/cloud.google.com/go/container/apiv1`][cloud-container-ref] [Container][cloud-container] | stable | [`godoc.org/cloud.google.com/go/container/apiv1`][cloud-container-ref]
[ContainerAnalysis][cloud-containeranalysis] | beta | [`godoc.org/cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref] [ContainerAnalysis][cloud-containeranalysis] | beta | [`godoc.org/cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref]
[Dataproc][cloud-dataproc] | stable | [`godoc.org/cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref] [Dataproc][cloud-dataproc] | stable | [`godoc.org/cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref]
@ -501,3 +502,6 @@ for more information.
[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory [cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
[cloud-asset-docs]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory [cloud-asset-docs]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
[cloud-asset-ref]: https://godoc.org/cloud.google.com/go/asset/apiv1 [cloud-asset-ref]: https://godoc.org/cloud.google.com/go/asset/apiv1
[cloud-tasks]: https://cloud.google.com/tasks/
[cloud-tasks-ref]: https://godoc.org/cloud.google.com/go/cloudtasks/apiv2beta3

View file

@ -1,13 +1,47 @@
# How to Release this Repo # How to Create a New Release
1. Determine the current release version with `git tag -l`. It should look ## Prerequisites
something like `vX.Y.Z`. We'll call the current
version `$CV` and the new version `$NV`. Install [releasetool](https://github.com/googleapis/releasetool).
1. On master, run `git log $CV..` to list all the changes since the last
release. ## Create a release
1. Edit `CHANGES.md` to include a summary of the changes.
1. Mail the CL containing the `CHANGES.md` changes. When the CL is approved, submit it. 1. `cd` into the root directory, e.g., `~/go/src/cloud.google.com/go`
1. Without submitting any other CLs: 1. Checkout the master branch and ensure a clean and up-to-date state.
a. Switch to master. ```
b. Tag the repo with the next version: `git tag $NV`. git checkout master
c. Push the tag: `git push origin $NV`. git pull --tags origin master
```
1. Run releasetool to generate a changelog from the last version. Note,
releasetool will prompt if the new version is a major, minor, or patch
version.
```
releasetool start --language go
```
1. Format the output to match CHANGES.md.
1. Submit a CL with the changes in CHANGES.md. The commit message should look
like this (where `v0.31.0` is instead the correct version number):
```
all: Release v0.31.0
```
1. Wait for approval from all reviewers and then submit the CL.
1. Return to the master branch and pull the release commit.
```
git checkout master
git pull origin master
```
1. Tag the current commit with the new version (e.g., `v0.31.0`)
```
releasetool tag --language go
```
1. Publish the tag to GoogleSource (i.e., origin):
```
git push origin $NEW_VERSION
```
1. Visit the [releases page][releases] on GitHub and click the "Draft a new
release" button. For tag version, enter the tag published in the previous
step. For the release title, use the version (e.g., `v0.31.0`). For the
description, copy the changes added to CHANGES.md.
[releases]: https://github.com/GoogleCloudPlatform/google-cloud-go/releases

View file

@ -20,6 +20,7 @@
package metadata // import "cloud.google.com/go/compute/metadata" package metadata // import "cloud.google.com/go/compute/metadata"
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -31,9 +32,6 @@ import (
"strings" "strings"
"sync" "sync"
"time" "time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
) )
const ( const (
@ -143,7 +141,7 @@ func testOnGCE() bool {
go func() { go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent) req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, defaultClient.hc, req) res, err := defaultClient.hc.Do(req.WithContext(ctx))
if err != nil { if err != nil {
resc <- false resc <- false
return return

View file

@ -23,6 +23,7 @@
set -ex set -ex
APIS=( APIS=(
google/api/expr/artman_cel.yaml
google/iam/artman_iam_admin.yaml google/iam/artman_iam_admin.yaml
google/cloud/asset/artman_cloudasset_v1beta1.yaml google/cloud/asset/artman_cloudasset_v1beta1.yaml
google/iam/credentials/artman_iamcredentials_v1.yaml google/iam/credentials/artman_iamcredentials_v1.yaml
@ -37,6 +38,8 @@ google/cloud/oslogin/artman_oslogin_v1.yaml
google/cloud/oslogin/artman_oslogin_v1beta.yaml google/cloud/oslogin/artman_oslogin_v1beta.yaml
google/cloud/redis/artman_redis_v1beta1.yaml google/cloud/redis/artman_redis_v1beta1.yaml
google/cloud/redis/artman_redis_v1.yaml google/cloud/redis/artman_redis_v1.yaml
google/cloud/scheduler/artman_cloudscheduler_v1beta1.yaml
google/cloud/securitycenter/artman_securitycenter_v1beta1.yaml
google/cloud/speech/artman_speech_v1.yaml google/cloud/speech/artman_speech_v1.yaml
google/cloud/speech/artman_speech_v1p1beta1.yaml google/cloud/speech/artman_speech_v1p1beta1.yaml
google/cloud/tasks/artman_cloudtasks_v2beta2.yaml google/cloud/tasks/artman_cloudtasks_v2beta2.yaml
@ -47,7 +50,6 @@ google/cloud/videointelligence/artman_videointelligence_v1beta1.yaml
google/cloud/videointelligence/artman_videointelligence_v1beta2.yaml google/cloud/videointelligence/artman_videointelligence_v1beta2.yaml
google/cloud/vision/artman_vision_v1.yaml google/cloud/vision/artman_vision_v1.yaml
google/cloud/vision/artman_vision_v1p1beta1.yaml google/cloud/vision/artman_vision_v1p1beta1.yaml
google/container/artman_container.yaml
google/devtools/artman_clouddebugger.yaml google/devtools/artman_clouddebugger.yaml
google/devtools/clouderrorreporting/artman_errorreporting.yaml google/devtools/clouderrorreporting/artman_errorreporting.yaml
google/devtools/cloudtrace/artman_cloudtrace_v1.yaml google/devtools/cloudtrace/artman_cloudtrace_v1.yaml
@ -70,6 +72,13 @@ for api in "${APIS[@]}"; do
cp -r artman-genfiles/gapi-*/cloud.google.com/go/* $GOPATH/src/cloud.google.com/go/ cp -r artman-genfiles/gapi-*/cloud.google.com/go/* $GOPATH/src/cloud.google.com/go/
done done
# NOTE(pongad): `sed -i` doesn't work on Macs, because -i option needs an argument.
# `-i ''` doesn't work on GNU, since the empty string is treated as a file name.
# So we just create the backup and delete it after.
ver=$(date +%Y%m%d)
find $GOPATH/src/cloud.google.com/go/ -name '*.go' -exec sed -i.backup -e "s/^const versionClient.*/const versionClient = \"$ver\"/" '{}' +
find $GOPATH/src/cloud.google.com/go/ -name '*.backup' -delete
#go list cloud.google.com/go/... | grep apiv | xargs go test #go list cloud.google.com/go/... | grep apiv | xargs go test
#go test -short cloud.google.com/go/... #go test -short cloud.google.com/go/...

View file

@ -0,0 +1,17 @@
language: go
go:
- 1.10.x
go_import_path: contrib.go.opencensus.io/exporter/ocagent
before_script:
- GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
- PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
script:
- go build ./... # Ensure dependency updates don't break build
- if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi
- go vet ./...
- go test -v -race $PKGS # Run all the tests with the race detector enabled
- 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'

View file

@ -0,0 +1,24 @@
# How to contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution,
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult [GitHub Help] for more
information on using pull requests.
[GitHub Help]: https://help.github.com/articles/about-pull-requests/

View file

@ -1,7 +1,6 @@
Apache License Apache License
Version 2.0, January 2004 Version 2.0, January 2004
https://www.apache.org/licenses/ http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
@ -176,13 +175,24 @@
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS
Copyright 2016 Docker, Inc. APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,

View file

@ -0,0 +1,61 @@
# OpenCensus Agent Go Exporter
[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url]
This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter.
OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from
OpenCensus Library, export them to other backends and possibly push configurations back to
Library. See more details on [OC-Agent Readme][OCAgentReadme].
Note: This is an experimental repository and is likely to get backwards-incompatible changes.
Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo].
## Installation
```bash
$ go get -u contrib.go.opencensus.io/exporter/ocagent/v1
```
## Usage
```go
import (
"context"
"fmt"
"log"
"time"
"contrib.go.opencensus.io/exporter/ocagent/v1"
"go.opencensus.io/trace"
)
func Example() {
exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name"))
if err != nil {
log.Fatalf("Failed to create the agent exporter: %v", err)
}
defer exp.Stop()
// Now register it as a trace exporter.
trace.RegisterExporter(exp)
// Then use the OpenCensus tracing library, like we normally would.
ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example")
defer span.End()
for i := 0; i < 10; i++ {
_, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i))
<-time.After(6 * time.Millisecond)
iSpan.End()
}
}
```
[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto
[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go
[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg
[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent
[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master
[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent

View file

@ -0,0 +1,38 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
import (
"math/rand"
"time"
)
var randSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
// retries function fn upto n times, if fn returns an error lest it returns nil early.
// It applies exponential backoff in units of (1<<n) + jitter microsends.
func nTriesWithExponentialBackoff(nTries int64, timeBaseUnit time.Duration, fn func() error) (err error) {
for i := int64(0); i < nTries; i++ {
err = fn()
if err == nil {
return nil
}
// Backoff for a time period with a pseudo-random jitter
jitter := time.Duration(randSrc.Float64()*100) * time.Microsecond
ts := jitter + ((1 << uint64(i)) * timeBaseUnit)
<-time.After(ts)
}
return err
}

View file

@ -0,0 +1,9 @@
module contrib.go.opencensus.io/exporter/ocagent
require (
github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a
github.com/golang/protobuf v1.2.0
go.opencensus.io v0.17.0
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf
google.golang.org/grpc v1.15.0
)

View file

@ -0,0 +1,44 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/census-instrumentation/opencensus-proto v0.0.1 h1:4v5I+ax5jCmwTYVaWQacX8ZSxvUZemBX4UwBGSkDeoA=
github.com/census-instrumentation/opencensus-proto v0.0.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a h1:t88pXOTS5K+pjfuhTOcul6sdC4khgqB8ukyfbe62Zxo=
github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
go.opencensus.io v0.17.0 h1:2Cu88MYg+1LU+WVD+NWwYhyP0kKgRlN9QjWGaX0jKTE=
go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.15.0 h1:Az/KuahOM4NAidTEuJCv/RonAA7rYsTPkqXVjr+8OOw=
google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View file

@ -0,0 +1,41 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
import (
"os"
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
"go.opencensus.io"
)
func createNodeInfo(nodeName string) *commonpb.Node {
return &commonpb.Node{
Identifier: &commonpb.ProcessIdentifier{
HostName: os.Getenv("HOSTNAME"),
Pid: uint32(os.Getpid()),
StartTimestamp: timeToTimestamp(startTime),
},
LibraryInfo: &commonpb.LibraryInfo{
Language: commonpb.LibraryInfo_GO_LANG,
ExporterVersion: Version,
CoreLibraryVersion: opencensus.Version(),
},
ServiceInfo: &commonpb.ServiceInfo{
Name: nodeName,
},
Attributes: make(map[string]string),
}
}

View file

@ -0,0 +1,299 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
import (
"context"
"errors"
"fmt"
"sync"
"time"
"google.golang.org/api/support/bundler"
"google.golang.org/grpc"
"go.opencensus.io/trace"
agentcommonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1"
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
)
var startupMu sync.Mutex
var startTime time.Time
func init() {
startupMu.Lock()
startTime = time.Now()
startupMu.Unlock()
}
var _ trace.Exporter = (*Exporter)(nil)
type Exporter struct {
// mu protects the non-atomic and non-channel variables
mu sync.RWMutex
started bool
stopped bool
agentPort uint16
agentAddress string
serviceName string
canDialInsecure bool
traceSvcClient agenttracepb.TraceServiceClient
traceExporter agenttracepb.TraceService_ExportClient
nodeInfo *agentcommonpb.Node
grpcClientConn *grpc.ClientConn
traceBundler *bundler.Bundler
}
func NewExporter(opts ...ExporterOption) (*Exporter, error) {
exp, err := NewUnstartedExporter(opts...)
if err != nil {
return nil, err
}
if err := exp.Start(); err != nil {
return nil, err
}
return exp, nil
}
const spanDataBufferSize = 300
func NewUnstartedExporter(opts ...ExporterOption) (*Exporter, error) {
e := new(Exporter)
for _, opt := range opts {
opt.withExporter(e)
}
if e.agentPort <= 0 {
e.agentPort = DefaultAgentPort
}
traceBundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) {
e.uploadTraces(bundle.([]*trace.SpanData))
})
traceBundler.DelayThreshold = 2 * time.Second
traceBundler.BundleCountThreshold = spanDataBufferSize
e.traceBundler = traceBundler
e.nodeInfo = createNodeInfo(e.serviceName)
return e, nil
}
const (
maxInitialConfigRetries = 10
maxInitialTracesRetries = 10
)
// Start dials to the agent, establishing a connection to it. It also
// initiates the Config and Trace services by sending over the initial
// messages that consist of the node identifier. Start performs a best case
// attempt to try to send the initial messages, by applying exponential
// backoff at most 10 times.
func (ae *Exporter) Start() error {
ae.mu.Lock()
defer ae.mu.Unlock()
err := ae.doStartLocked()
if err == nil {
ae.started = true
return nil
}
// Otherwise we have an error and should clean up to avoid leaking resources.
ae.started = false
if ae.grpcClientConn != nil {
ae.grpcClientConn.Close()
}
return err
}
func (ae *Exporter) prepareAgentAddress() string {
if ae.agentAddress != "" {
return ae.agentAddress
}
port := DefaultAgentPort
if ae.agentPort > 0 {
port = ae.agentPort
}
return fmt.Sprintf("%s:%d", DefaultAgentHost, port)
}
func (ae *Exporter) doStartLocked() error {
if ae.started {
return nil
}
// Now start it
cc, err := ae.dialToAgent()
if err != nil {
return err
}
ae.grpcClientConn = cc
// Initiate the trace service by sending over node identifier info.
traceSvcClient := agenttracepb.NewTraceServiceClient(cc)
traceExporter, err := traceSvcClient.Export(context.Background())
if err != nil {
return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err)
}
firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{Node: ae.nodeInfo}
err = nTriesWithExponentialBackoff(maxInitialTracesRetries, 200*time.Microsecond, func() error {
return traceExporter.Send(firstTraceMessage)
})
if err != nil {
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
}
ae.traceExporter = traceExporter
// Initiate the config service by sending over node identifier info.
configStream, err := traceSvcClient.Config(context.Background())
if err != nil {
return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err)
}
firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: ae.nodeInfo}
err = nTriesWithExponentialBackoff(maxInitialConfigRetries, 200*time.Microsecond, func() error {
return configStream.Send(firstCfgMessage)
})
if err != nil {
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
}
// In the background, handle trace configurations that are beamed down
// by the agent, but also reply to it with the applied configuration.
go ae.handleConfigStreaming(configStream)
return nil
}
// dialToAgent performs a best case attempt to dial to the agent.
// It retries failed dials with:
// * gRPC dialTimeout of 1s
// * exponential backoff, 5 times with a period of 50ms
// hence in the worst case of (no agent actually available), it
// will take at least:
// (5 * 1s) + ((1<<5)-1) * 0.01 s = 5s + 1.55s = 6.55s
func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
addr := ae.prepareAgentAddress()
dialOpts := []grpc.DialOption{grpc.WithBlock()}
if ae.canDialInsecure {
dialOpts = append(dialOpts, grpc.WithInsecure())
}
var cc *grpc.ClientConn
dialOpts = append(dialOpts, grpc.WithTimeout(1*time.Second))
dialBackoffWaitPeriod := 50 * time.Millisecond
err := nTriesWithExponentialBackoff(5, dialBackoffWaitPeriod, func() error {
var err error
cc, err = grpc.Dial(addr, dialOpts...)
return err
})
return cc, err
}
func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error {
for {
recv, err := configStream.Recv()
if err != nil {
// TODO: Check if this is a transient error or exponential backoff-able.
return err
}
cfg := recv.Config
if cfg == nil {
continue
}
// Otherwise now apply the trace configuration sent down from the agent
if psamp := cfg.GetProbabilitySampler(); psamp != nil {
trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)})
} else if csamp := cfg.GetConstantSampler(); csamp != nil {
alwaysSample := csamp.Decision == true
if alwaysSample {
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
} else {
trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()})
}
} else { // TODO: Add the rate limiting sampler here
}
// Then finally send back to upstream the newly applied configuration
err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}})
if err != nil {
return err
}
}
}
var (
errNotStarted = errors.New("not started")
)
// Stop shuts down all the connections and resources
// related to the exporter.
func (ae *Exporter) Stop() error {
ae.mu.Lock()
defer ae.mu.Unlock()
if !ae.started {
return errNotStarted
}
if ae.stopped {
// TODO: tell the user that we've already stopped, so perhaps a sentinel error?
return nil
}
ae.Flush()
// Now close the underlying gRPC connection.
var err error
if ae.grpcClientConn != nil {
err = ae.grpcClientConn.Close()
}
// At this point we can change the state variables: started and stopped
ae.started = false
ae.stopped = true
return err
}
func (ae *Exporter) ExportSpan(sd *trace.SpanData) {
if sd == nil {
return
}
_ = ae.traceBundler.Add(sd, -1)
}
func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
if len(sdl) == 0 {
return
}
protoSpans := make([]*tracepb.Span, 0, len(sdl))
for _, sd := range sdl {
if sd != nil {
protoSpans = append(protoSpans, ocSpanToProtoSpan(sd))
}
}
if len(protoSpans) > 0 {
_ = ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
Spans: protoSpans,
})
}
}
func (ae *Exporter) Flush() {
ae.traceBundler.Flush()
}

View file

@ -0,0 +1,80 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
const (
DefaultAgentPort uint16 = 55678
DefaultAgentHost string = "localhost"
)
type ExporterOption interface {
withExporter(e *Exporter)
}
type portSetter uint16
func (ps portSetter) withExporter(e *Exporter) {
e.agentPort = uint16(ps)
}
var _ ExporterOption = (*portSetter)(nil)
type insecureGrpcConnection int
var _ ExporterOption = (*insecureGrpcConnection)(nil)
func (igc *insecureGrpcConnection) withExporter(e *Exporter) {
e.canDialInsecure = true
}
// WithInsecure disables client transport security for the exporter's gRPC connection
// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure
// does. Note, by default, client security is required unless WithInsecure is used.
func WithInsecure() ExporterOption { return new(insecureGrpcConnection) }
// WithPort allows one to override the port that the exporter will
// connect to the agent on, instead of using DefaultAgentPort.
func WithPort(port uint16) ExporterOption {
return portSetter(port)
}
type addressSetter string
func (as addressSetter) withExporter(e *Exporter) {
e.agentAddress = string(as)
}
var _ ExporterOption = (*addressSetter)(nil)
// WithAddress allows one to set the address that the exporter will
// connect to the agent on. If unset, it will instead try to use
// connect to DefaultAgentHost:DefaultAgentPort
func WithAddress(addr string) ExporterOption {
return addressSetter(addr)
}
type serviceNameSetter string
func (sns serviceNameSetter) withExporter(e *Exporter) {
e.serviceName = string(sns)
}
var _ ExporterOption = (*serviceNameSetter)(nil)
// WithServiceName allows one to set/override the service name
// that the exporter will report to the agent.
func WithServiceName(serviceName string) ExporterOption {
return serviceNameSetter(serviceName)
}

View file

@ -0,0 +1,162 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
import (
"time"
"go.opencensus.io/trace"
"go.opencensus.io/trace/tracestate"
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
"github.com/golang/protobuf/ptypes/timestamp"
)
func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span {
if sd == nil {
return nil
}
var namePtr *tracepb.TruncatableString
if sd.Name != "" {
namePtr = &tracepb.TruncatableString{Value: sd.Name}
}
return &tracepb.Span{
TraceId: sd.TraceID[:],
SpanId: sd.SpanID[:],
ParentSpanId: sd.ParentSpanID[:],
Status: ocStatusToProtoStatus(sd.Status),
StartTime: timeToTimestamp(sd.StartTime),
EndTime: timeToTimestamp(sd.EndTime),
Links: ocLinksToProtoLinks(sd.Links),
Kind: ocSpanKindToProtoSpanKind(sd.SpanKind),
Name: namePtr,
Attributes: ocAttributesToProtoAttributes(sd.Attributes),
Tracestate: ocTracestateToProtoTracestate(sd.Tracestate),
}
}
var blankStatus trace.Status
func ocStatusToProtoStatus(status trace.Status) *tracepb.Status {
if status == blankStatus {
return nil
}
return &tracepb.Status{
Code: status.Code,
Message: status.Message,
}
}
func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links {
if len(links) == 0 {
return nil
}
sl := make([]*tracepb.Span_Link, 0, len(links))
for _, ocLink := range links {
// This redefinition is necessary to prevent ocLink.*ID[:] copies
// being reused -- in short we need a new ocLink per iteration.
ocLink := ocLink
sl = append(sl, &tracepb.Span_Link{
TraceId: ocLink.TraceID[:],
SpanId: ocLink.SpanID[:],
Type: ocLinkTypeToProtoLinkType(ocLink.Type),
})
}
return &tracepb.Span_Links{
Link: sl,
}
}
func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type {
switch oct {
case trace.LinkTypeChild:
return tracepb.Span_Link_CHILD_LINKED_SPAN
case trace.LinkTypeParent:
return tracepb.Span_Link_PARENT_LINKED_SPAN
default:
return tracepb.Span_Link_TYPE_UNSPECIFIED
}
}
func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes {
if len(attrs) == 0 {
return nil
}
outMap := make(map[string]*tracepb.AttributeValue)
for k, v := range attrs {
switch v := v.(type) {
case bool:
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}}
case int:
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}}
case int64:
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}}
case string:
outMap[k] = &tracepb.AttributeValue{
Value: &tracepb.AttributeValue_StringValue{
StringValue: &tracepb.TruncatableString{Value: v},
},
}
}
}
return &tracepb.Span_Attributes{
AttributeMap: outMap,
}
}
func timeToTimestamp(t time.Time) *timestamp.Timestamp {
nanoTime := t.UnixNano()
return &timestamp.Timestamp{
Seconds: nanoTime / 1e9,
Nanos: int32(nanoTime % 1e9),
}
}
func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind {
switch kind {
case trace.SpanKindClient:
return tracepb.Span_CLIENT
case trace.SpanKindServer:
return tracepb.Span_SERVER
default:
return tracepb.Span_SPAN_KIND_UNSPECIFIED
}
}
func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate {
if ts == nil {
return nil
}
return &tracepb.Span_Tracestate{
Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()),
}
}
func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry {
protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries))
for _, entry := range entries {
protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{
Key: entry.Key,
Value: entry.Value,
})
}
return protoEntries
}

View file

@ -0,0 +1,17 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocagent
const Version = "0.0.1"

View file

@ -9,6 +9,7 @@ _obj
_test _test
.DS_Store .DS_Store
.idea/ .idea/
.vscode/
# Architecture specific extensions/prefixes # Architecture specific extensions/prefixes
*.[568vq] *.[568vq]

View file

@ -6,8 +6,6 @@ go:
- master - master
- 1.11.x - 1.11.x
- 1.10.x - 1.10.x
- 1.9.x
- 1.8.x
matrix: matrix:
allow_failures: allow_failures:
@ -20,7 +18,7 @@ before_install:
- curl -L -o $GOPATH/bin/dep https://github.com/golang/dep/releases/download/v$DEP_VERSION/dep-linux-amd64 && chmod +x $GOPATH/bin/dep - curl -L -o $GOPATH/bin/dep https://github.com/golang/dep/releases/download/v$DEP_VERSION/dep-linux-amd64 && chmod +x $GOPATH/bin/dep
install: install:
- go get -u github.com/golang/lint/golint - go get -u golang.org/x/lint/golint
- go get -u github.com/stretchr/testify - go get -u github.com/stretchr/testify
- go get -u github.com/GoASTScanner/gas - go get -u github.com/GoASTScanner/gas
- dep ensure - dep ensure
@ -32,4 +30,4 @@ script:
- go vet ./autorest/... - go vet ./autorest/...
- test -z "$(gas ./autorest/... | tee /dev/stderr | grep Error)" - test -z "$(gas ./autorest/... | tee /dev/stderr | grep Error)"
- go build -v ./autorest/... - go build -v ./autorest/...
- go test -v ./autorest/... - go test -race -v ./autorest/...

View file

@ -1,5 +1,100 @@
# CHANGELOG # CHANGELOG
## v11.2.8
### Bug Fixes
- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package.
## v11.2.7
### Bug Fixes
- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`.
Note that for backward compatibility reasons, both will work until the next major version release of the package.
## v11.2.6
### Bug Fixes
- If zero bytes are read from a polling response body don't attempt to unmarshal them.
## v11.2.5
### Bug Fixes
- Removed race condition in `autorest.DoRetryForStatusCodes`.
## v11.2.4
### Bug Fixes
- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available.
## v11.2.1
NOTE: Versions of Go prior to 1.10 have been removed from CI as they no
longer work with golint.
### Bug Fixes
- Method `MSIConfig.Authorizer` now supports user-assigned identities.
- The adal package now reports its own user-agent string.
## v11.2.0
### New Features
- Added `tracing` package that enables instrumentation of HTTP and API calls.
Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable`
will start instrumenting the code for metrics and traces.
Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or
calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an
App Insights Local Forwarder that is needs to be running. Note that if the
AI Local Forwarder is not running tracking will still be enabled.
By default, instrumentation is disabled. Once enabled, instrumentation can also
be programatically disabled by calling `Disable`.
- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated.
### Bug Fixes
- Don't use the initial request's context for LRO polling.
- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if
it is already set.
## v11.1.1
### Bug Fixes
- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller.
## v11.1.0
### New Features
- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI.
- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version.
## v11.0.1
### New Features
- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication.
## v11.0.0
### Breaking Changes
- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number`
- ExpiresIn
- ExpiresOn
- NotBefore
### New Features
- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource.
- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration.
## v10.15.5 ## v10.15.5
### Bug Fixes ### Bug Fixes
@ -28,21 +123,21 @@
### Bug Fixes ### Bug Fixes
- If an LRO API returns a ```Failed``` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. - If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll.
- For failed LROs without an OData v4 error include the response body in the error's ```AdditionalInfo``` field to aid in diagnosing the failure. - For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure.
## v10.15.0 ## v10.15.0
### New Features ### New Features
- Add initial support for request/response logging via setting environment variables. - Add initial support for request/response logging via setting environment variables.
Setting ```AZURE_GO_SDK_LOG_LEVEL``` to ```LogInfo``` will log request/response Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response
without their bodies. To include the bodies set the log level to ```LogDebug```. without their bodies. To include the bodies set the log level to `LogDebug`.
By default the logger writes to strerr, however it can also write to stdout or a file By default the logger writes to strerr, however it can also write to stdout or a file
if specified in ```AZURE_GO_SDK_LOG_FILE```. Note that if the specified file if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file
already exists it will be truncated. already exists it will be truncated.
IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key
headers. Any other secrets will *not* be redacted. headers. Any other secrets will _not_ be redacted.
## v10.14.0 ## v10.14.0
@ -124,10 +219,10 @@
### Deprecated Methods ### Deprecated Methods
| Old Method | New Method | | Old Method | New Method |
|-------------:|:-----------:| | -------------------------: | :---------------------------: |
|azure.NewFuture() | azure.NewFutureFromResponse()| | azure.NewFuture() | azure.NewFutureFromResponse() |
|Future.WaitForCompletion() | Future.WaitForCompletionRef()| | Future.WaitForCompletion() | Future.WaitForCompletionRef() |
### New Features ### New Features
@ -159,7 +254,7 @@
### New Features ### New Features
- Added *WithContext() methods to ADAL token refresh operations. - Added \*WithContext() methods to ADAL token refresh operations.
## v10.6.2 ## v10.6.2
@ -192,12 +287,14 @@
## v10.4.0 ## v10.4.0
### New Features ### New Features
- Added helper for parsing Azure Resource ID's. - Added helper for parsing Azure Resource ID's.
- Added deprecation message to utils.GetEnvVarOrExit() - Added deprecation message to utils.GetEnvVarOrExit()
## v10.3.0 ## v10.3.0
### New Features ### New Features
- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints - Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints
- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint - Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint
@ -255,6 +352,7 @@
- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). - The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race).
## v9.10.0 ## v9.10.0
- Fix the Service Bus suffix in Azure public env - Fix the Service Bus suffix in Azure public env
- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) - Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control)
@ -308,6 +406,7 @@
## v9.5.3 ## v9.5.3
### Bug Fixes ### Bug Fixes
- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. - Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters.
- Set correct Content Type when using autorest.WithFormData. - Set correct Content Type when using autorest.WithFormData.
@ -315,7 +414,7 @@
### Bug Fixes ### Bug Fixes
- Check for nil *http.Response before dereferencing it. - Check for nil \*http.Response before dereferencing it.
## v9.5.1 ## v9.5.1
@ -399,8 +498,8 @@
### Bug Fixes ### Bug Fixes
- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. - RetriableRequest can now tolerate a ReadSeekable body being read but not reset.
- Adding missing Apache Headers - Adding missing Apache Headers
## v9.0.0 ## v9.0.0
@ -429,9 +528,11 @@ Updates to Error string formats for clarity. Also, adding a copy of the http.Res
- Make RetriableRequest work with multiple versions of Go - Make RetriableRequest work with multiple versions of Go
## v8.1.1 ## v8.1.1
Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8.
## v8.1.0 ## v8.1.0
Adds RetriableRequest type for more efficient handling of retrying HTTP requests. Adds RetriableRequest type for more efficient handling of retrying HTTP requests.
## v8.0.0 ## v8.0.0
@ -440,9 +541,11 @@ ADAL refactored into its own package.
Support for UNIX time. Support for UNIX time.
## v7.3.1 ## v7.3.1
- Version Testing now removed from production bits that are shipped with the library. - Version Testing now removed from production bits that are shipped with the library.
## v7.3.0 ## v7.3.0
- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations - Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations
to acknowledge that they do not need either the entire or a trailing portion to acknowledge that they do not need either the entire or a trailing portion
of accepts response body. In doing so, Go's http library can reuse HTTP of accepts response body. In doing so, Go's http library can reuse HTTP
@ -452,40 +555,49 @@ Support for UNIX time.
- Updating Glide dependencies. - Updating Glide dependencies.
## v7.2.5 ## v7.2.5
- Fixed the Active Directory endpoint for the China cloud. - Fixed the Active Directory endpoint for the China cloud.
- Removes UTF-8 BOM if present in response payload. - Removes UTF-8 BOM if present in response payload.
- Added telemetry. - Added telemetry.
## v7.2.3 ## v7.2.3
- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay - Fixing bug in calls to `DelayForBackoff` that caused doubling of delay
duration. duration.
## v7.2.2 ## v7.2.2
- autorest/azure: added ASM and ARM VM DNS suffixes. - autorest/azure: added ASM and ARM VM DNS suffixes.
## v7.2.1 ## v7.2.1
- fixed parsing of UTC times that are not RFC3339 conformant. - fixed parsing of UTC times that are not RFC3339 conformant.
## v7.2.0 ## v7.2.0
- autorest/validation: Reformat validation error for better error message. - autorest/validation: Reformat validation error for better error message.
## v7.1.0 ## v7.1.0
- preparer: Added support for multipart formdata - WithMultiPartFormdata() - preparer: Added support for multipart formdata - WithMultiPartFormdata()
- preparer: Added support for sending file in request body - WithFile - preparer: Added support for sending file in request body - WithFile
- client: Added RetryDuration parameter. - client: Added RetryDuration parameter.
- autorest/validation: new package for validation code for Azure Go SDK. - autorest/validation: new package for validation code for Azure Go SDK.
## v7.0.7 ## v7.0.7
- Add trailing / to endpoint - Add trailing / to endpoint
- azure: add EnvironmentFromName - azure: add EnvironmentFromName
## v7.0.6 ## v7.0.6
- Add retry logic for 408, 500, 502, 503 and 504 status codes. - Add retry logic for 408, 500, 502, 503 and 504 status codes.
- Change url path and query encoding logic. - Change url path and query encoding logic.
- Fix DelayForBackoff for proper exponential delay. - Fix DelayForBackoff for proper exponential delay.
- Add CookieJar in Client. - Add CookieJar in Client.
## v7.0.5 ## v7.0.5
- Add check to start polling only when status is in [200,201,202]. - Add check to start polling only when status is in [200,201,202].
- Refactoring for unchecked errors. - Refactoring for unchecked errors.
- azure/persist changes. - azure/persist changes.
@ -494,20 +606,25 @@ Support for UNIX time.
- Add attribute details in service error. - Add attribute details in service error.
## v7.0.4 ## v7.0.4
- Better error messages for long running operation failures - Better error messages for long running operation failures
## v7.0.3 ## v7.0.3
- Corrected DoPollForAsynchronous to properly handle the initial response - Corrected DoPollForAsynchronous to properly handle the initial response
## v7.0.2 ## v7.0.2
- Corrected DoPollForAsynchronous to continue using the polling method first discovered - Corrected DoPollForAsynchronous to continue using the polling method first discovered
## v7.0.1 ## v7.0.1
- Fixed empty JSON input error in ByUnmarshallingJSON - Fixed empty JSON input error in ByUnmarshallingJSON
- Fixed polling support for GET calls - Fixed polling support for GET calls
- Changed format name from TimeRfc1123 to TimeRFC1123 - Changed format name from TimeRfc1123 to TimeRFC1123
## v7.0.0 ## v7.0.0
- Added ByCopying responder with supporting TeeReadCloser - Added ByCopying responder with supporting TeeReadCloser
- Rewrote Azure asynchronous handling - Rewrote Azure asynchronous handling
- Reverted to only unmarshalling JSON - Reverted to only unmarshalling JSON
@ -524,9 +641,11 @@ only checked for one of those (that is, the presence of the `Azure-AsyncOperatio
The new code correctly covers all cases and aligns with the other Azure SDKs. The new code correctly covers all cases and aligns with the other Azure SDKs.
## v6.1.0 ## v6.1.0
- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. - Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values.
## v6.0.0 ## v6.0.0
- Completely reworked the handling of polled and asynchronous requests - Completely reworked the handling of polled and asynchronous requests
- Removed unnecessary routines - Removed unnecessary routines
- Reworked `mocks.Sender` to replay a series of `http.Response` objects - Reworked `mocks.Sender` to replay a series of `http.Response` objects
@ -537,21 +656,25 @@ Handling polled and asynchronous requests is no longer part of `Client#Send`. In
and `azure.DoPollForAsynchronous` for examples. and `azure.DoPollForAsynchronous` for examples.
## v5.0.0 ## v5.0.0
- Added new RespondDecorators unmarshalling primitive types - Added new RespondDecorators unmarshalling primitive types
- Corrected application of inspection and authorization PrependDecorators - Corrected application of inspection and authorization PrependDecorators
## v4.0.0 ## v4.0.0
- Added support for Azure long-running operations. - Added support for Azure long-running operations.
- Added cancelation support to all decorators and functions that may delay. - Added cancelation support to all decorators and functions that may delay.
- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. - Breaking: `DelayForBackoff` now accepts a channel, which may be nil.
## v3.1.0 ## v3.1.0
- Add support for OAuth Device Flow authorization. - Add support for OAuth Device Flow authorization.
- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. - Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material.
- Add helpers for persisting and restoring Tokens. - Add helpers for persisting and restoring Tokens.
- Increased code coverage in the github.com/Azure/autorest/azure package - Increased code coverage in the github.com/Azure/autorest/azure package
## v3.0.0 ## v3.0.0
- Breaking: `NewErrorWithError` no longer takes `statusCode int`. - Breaking: `NewErrorWithError` no longer takes `statusCode int`.
- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. - Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`.
- Breaking: `Client#Send()` no longer takes `codes ...int` argument. - Breaking: `Client#Send()` no longer takes `codes ...int` argument.

View file

@ -1,6 +1,26 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:e1b859e3d9e90007d5fbf25edf57733b224f1857f6592636130afab3af8cfae7"
name = "contrib.go.opencensus.io/exporter/ocagent"
packages = ["."]
pruneopts = ""
revision = "00af367e65149ff1f2f4b93bbfbb84fd9297170d"
version = "v0.2.0"
[[projects]]
digest = "1:e0a4505d5cf7ac6b5d92e3aee79d838b5f1ae8e9641ec7fa5d1e9b01d7a7ea95"
name = "github.com/census-instrumentation/opencensus-proto"
packages = [
"gen-go/agent/common/v1",
"gen-go/agent/trace/v1",
"gen-go/trace/v1",
]
pruneopts = ""
revision = "24333298e36590ea0716598caacc8959fc393c48"
version = "v0.0.2"
[[projects]] [[projects]]
digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77" digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77"
name = "github.com/davecgh/go-spew" name = "github.com/davecgh/go-spew"
@ -18,20 +38,35 @@
version = "v3.2.0" version = "v3.2.0"
[[projects]] [[projects]]
branch = "master"
digest = "1:7f175a633086a933d1940a7e7dc2154a0070a7c25fb4a2f671f3eef1a34d1fd7" digest = "1:7f175a633086a933d1940a7e7dc2154a0070a7c25fb4a2f671f3eef1a34d1fd7"
name = "github.com/dimchansky/utfbom" name = "github.com/dimchansky/utfbom"
packages = ["."] packages = ["."]
pruneopts = "" pruneopts = ""
revision = "5448fe645cb1964ba70ac8f9f2ffe975e61a536c" revision = "5448fe645cb1964ba70ac8f9f2ffe975e61a536c"
version = "v1.0.0"
[[projects]]
digest = "1:3dd078fda7500c341bc26cfbc6c6a34614f295a2457149fc1045cab767cbcf18"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
"ptypes/wrappers",
]
pruneopts = ""
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
version = "v1.2.0"
[[projects]] [[projects]]
branch = "master"
digest = "1:096a8a9182648da3d00ff243b88407838902b6703fc12657f76890e08d1899bf" digest = "1:096a8a9182648da3d00ff243b88407838902b6703fc12657f76890e08d1899bf"
name = "github.com/mitchellh/go-homedir" name = "github.com/mitchellh/go-homedir"
packages = ["."] packages = ["."]
pruneopts = "" pruneopts = ""
revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4" revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
version = "v1.0.0"
[[projects]] [[projects]]
digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
@ -52,25 +87,160 @@
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2" version = "v1.2.2"
[[projects]]
digest = "1:ad67dfd3799a2c58f6c65871dd141d8b53f61f600aec48ce8d7fa16a4d5476f8"
name = "go.opencensus.io"
packages = [
".",
"exemplar",
"internal",
"internal/tagencoding",
"plugin/ochttp",
"plugin/ochttp/propagation/b3",
"plugin/ochttp/propagation/tracecontext",
"stats",
"stats/internal",
"stats/view",
"tag",
"trace",
"trace/internal",
"trace/propagation",
"trace/tracestate",
]
pruneopts = ""
revision = "b7bf3cdb64150a8c8c53b769fdeb2ba581bd4d4b"
version = "v0.18.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:793a79198b755828dec284c6f1325e24e09186f1b7ba818b65c7c35104ed86eb" digest = "1:78f41d38365ccef743e54ed854a2faf73313ba0750c621116a8eeb0395590bd0"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
packages = [ packages = [
"pkcs12", "pkcs12",
"pkcs12/internal/rc2", "pkcs12/internal/rc2",
] ]
pruneopts = "" pruneopts = ""
revision = "614d502a4dac94afa3a6ce146bd1736da82514c6" revision = "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb"
[[projects]]
branch = "master"
digest = "1:547dcb6aebfb7fb17947660ebb034470c13f4d63d893def190a2f7ba3d09bc38"
name = "golang.org/x/net"
packages = [
"context",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"trace",
]
pruneopts = ""
revision = "49bb7cea24b1df9410e1712aa6433dae904ff66a"
[[projects]]
branch = "master"
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
name = "golang.org/x/sync"
packages = ["semaphore"]
pruneopts = ""
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
[[projects]]
branch = "master"
digest = "1:2ed0bf267e44950120acd95570227e28184573ffb099bd85b529ee148e004ddb"
name = "golang.org/x/sys"
packages = ["unix"]
pruneopts = ""
revision = "fa43e7bc11baaae89f3f902b2b4d832b68234844"
[[projects]]
digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = ""
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:ca2e72555afbcebdead7c961b135650b5111dbbaf37a874de63976fdda57f129"
name = "google.golang.org/api"
packages = ["support/bundler"]
pruneopts = ""
revision = "c51f30376ab7ec4f22b65de846a41593c8b70f07"
[[projects]]
branch = "master"
digest = "1:1b3b4ec811695907c4a3cb92e4f32834a4a42459bff7e02068b6b2b5344803cd"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = ""
revision = "af9cb2a35e7f169ec875002c1829c9b315cddc04"
[[projects]]
digest = "1:15656947b87a6a240e61dcfae9e71a55a8d5677f240d12ab48f02cdbabf1e309"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclog",
"internal",
"internal/backoff",
"internal/channelz",
"internal/envconfig",
"internal/grpcrand",
"internal/transport",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
]
pruneopts = ""
revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1"
version = "v1.15.0"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
input-imports = [ input-imports = [
"contrib.go.opencensus.io/exporter/ocagent",
"github.com/dgrijalva/jwt-go", "github.com/dgrijalva/jwt-go",
"github.com/dimchansky/utfbom", "github.com/dimchansky/utfbom",
"github.com/mitchellh/go-homedir", "github.com/mitchellh/go-homedir",
"github.com/stretchr/testify/require", "github.com/stretchr/testify/require",
"go.opencensus.io/plugin/ochttp",
"go.opencensus.io/plugin/ochttp/propagation/tracecontext",
"go.opencensus.io/stats/view",
"go.opencensus.io/trace",
"golang.org/x/crypto/pkcs12", "golang.org/x/crypto/pkcs12",
] ]
solver-name = "gps-cdcl" solver-name = "gps-cdcl"

View file

@ -25,17 +25,21 @@
version = "3.1.0" version = "3.1.0"
[[constraint]] [[constraint]]
branch = "master"
name = "github.com/dimchansky/utfbom" name = "github.com/dimchansky/utfbom"
version = "1.0.0"
[[constraint]] [[constraint]]
branch = "master"
name = "github.com/mitchellh/go-homedir" name = "github.com/mitchellh/go-homedir"
version = "1.0.0"
[[constraint]] [[constraint]]
name = "github.com/stretchr/testify" name = "github.com/stretchr/testify"
version = "1.2.0" version = "1.2.0"
[[constraint]] [[constraint]]
branch = "master" name = "go.opencensus.io"
name = "golang.org/x/crypto" version = "0.18.0"
[[constraint]]
name = "contrib.go.opencensus.io/exporter/ocagent"
version = "0.2.0"

View file

@ -19,10 +19,6 @@ import (
"net/url" "net/url"
) )
const (
activeDirectoryAPIVersion = "1.0"
)
// OAuthConfig represents the endpoints needed // OAuthConfig represents the endpoints needed
// in OAuth operations // in OAuth operations
type OAuthConfig struct { type OAuthConfig struct {
@ -46,11 +42,25 @@ func validateStringParam(param, name string) error {
// NewOAuthConfig returns an OAuthConfig with tenant specific urls // NewOAuthConfig returns an OAuthConfig with tenant specific urls
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
apiVer := "1.0"
return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
}
// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
return nil, err return nil, err
} }
api := ""
// it's legal for tenantID to be empty so don't validate it // it's legal for tenantID to be empty so don't validate it
const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" if apiVersion != nil {
if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
return nil, err
}
api = fmt.Sprintf("?api-version=%s", *apiVersion)
}
const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
u, err := url.Parse(activeDirectoryEndpoint) u, err := url.Parse(activeDirectoryEndpoint)
if err != nil { if err != nil {
return nil, err return nil, err
@ -59,15 +69,15 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err
if err != nil { if err != nil {
return nil, err return nil, err
} }
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
if err != nil { if err != nil {
return nil, err return nil, err
} }
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
if err != nil { if err != nil {
return nil, err return nil, err
} }
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -38,7 +38,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
return sf(r) return sf(r)
} }
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the // SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
// http.Request and pass it along or, first, pass the http.Request along then react to the // http.Request and pass it along or, first, pass the http.Request along then react to the
// http.Response result. // http.Response result.
type SendDecorator func(Sender) Sender type SendDecorator func(Sender) Sender

View file

@ -29,13 +29,12 @@ import (
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/Azure/go-autorest/autorest/date" "github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/version" "github.com/Azure/go-autorest/tracing"
"github.com/dgrijalva/jwt-go" "github.com/dgrijalva/jwt-go"
) )
@ -97,18 +96,27 @@ type RefresherWithContext interface {
type TokenRefreshCallback func(Token) error type TokenRefreshCallback func(Token) error
// Token encapsulates the access token used to authorize Azure requests. // Token encapsulates the access token used to authorize Azure requests.
// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response
type Token struct { type Token struct {
AccessToken string `json:"access_token"` AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"` RefreshToken string `json:"refresh_token"`
ExpiresIn string `json:"expires_in"` ExpiresIn json.Number `json:"expires_in"`
ExpiresOn string `json:"expires_on"` ExpiresOn json.Number `json:"expires_on"`
NotBefore string `json:"not_before"` NotBefore json.Number `json:"not_before"`
Resource string `json:"resource"` Resource string `json:"resource"`
Type string `json:"token_type"` Type string `json:"token_type"`
} }
func newToken() Token {
return Token{
ExpiresIn: "0",
ExpiresOn: "0",
NotBefore: "0",
}
}
// IsZero returns true if the token object is zero-initialized. // IsZero returns true if the token object is zero-initialized.
func (t Token) IsZero() bool { func (t Token) IsZero() bool {
return t == Token{} return t == Token{}
@ -116,12 +124,12 @@ func (t Token) IsZero() bool {
// Expires returns the time.Time when the Token expires. // Expires returns the time.Time when the Token expires.
func (t Token) Expires() time.Time { func (t Token) Expires() time.Time {
s, err := strconv.Atoi(t.ExpiresOn) s, err := t.ExpiresOn.Float64()
if err != nil { if err != nil {
s = -3600 s = -3600
} }
expiration := date.NewUnixTimeFromSeconds(float64(s)) expiration := date.NewUnixTimeFromSeconds(s)
return time.Time(expiration).UTC() return time.Time(expiration).UTC()
} }
@ -218,6 +226,8 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo
token := jwt.New(jwt.SigningMethodRS256) token := jwt.New(jwt.SigningMethodRS256)
token.Header["x5t"] = thumbprint token.Header["x5t"] = thumbprint
x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)}
token.Header["x5c"] = x5c
token.Claims = jwt.MapClaims{ token.Claims = jwt.MapClaims{
"aud": spt.inner.OauthConfig.TokenEndpoint.String(), "aud": spt.inner.OauthConfig.TokenEndpoint.String(),
"iss": spt.inner.ClientID, "iss": spt.inner.ClientID,
@ -375,8 +385,13 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
spt.refreshLock = &sync.RWMutex{} // Don't override the refreshLock or the sender if those have been already set.
spt.sender = &http.Client{} if spt.refreshLock == nil {
spt.refreshLock = &sync.RWMutex{}
}
if spt.sender == nil {
spt.sender = &http.Client{Transport: tracing.Transport}
}
return nil return nil
} }
@ -414,6 +429,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
} }
spt := &ServicePrincipalToken{ spt := &ServicePrincipalToken{
inner: servicePrincipalToken{ inner: servicePrincipalToken{
Token: newToken(),
OauthConfig: oauthConfig, OauthConfig: oauthConfig,
Secret: secret, Secret: secret,
ClientID: id, ClientID: id,
@ -422,7 +438,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
RefreshWithin: defaultRefresh, RefreshWithin: defaultRefresh,
}, },
refreshLock: &sync.RWMutex{}, refreshLock: &sync.RWMutex{},
sender: &http.Client{}, sender: &http.Client{Transport: tracing.Transport},
refreshCallbacks: callbacks, refreshCallbacks: callbacks,
} }
return spt, nil return spt, nil
@ -653,6 +669,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
spt := &ServicePrincipalToken{ spt := &ServicePrincipalToken{
inner: servicePrincipalToken{ inner: servicePrincipalToken{
Token: newToken(),
OauthConfig: OAuthConfig{ OauthConfig: OAuthConfig{
TokenEndpoint: *msiEndpointURL, TokenEndpoint: *msiEndpointURL,
}, },
@ -662,7 +679,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
RefreshWithin: defaultRefresh, RefreshWithin: defaultRefresh,
}, },
refreshLock: &sync.RWMutex{}, refreshLock: &sync.RWMutex{},
sender: &http.Client{}, sender: &http.Client{Transport: tracing.Transport},
refreshCallbacks: callbacks, refreshCallbacks: callbacks,
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
} }
@ -779,7 +796,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
if err != nil { if err != nil {
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
} }
req.Header.Add("User-Agent", version.UserAgent()) req.Header.Add("User-Agent", userAgent())
req = req.WithContext(ctx) req = req.WithContext(ctx)
if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
v := url.Values{} v := url.Values{}

View file

@ -1,4 +1,9 @@
package version package adal
import (
"fmt"
"runtime"
)
// Copyright 2017 Microsoft Corporation // Copyright 2017 Microsoft Corporation
// //
@ -14,24 +19,17 @@ package version
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
import ( const number = "v1.0.0"
"fmt"
"runtime"
)
// Number contains the semantic version of this SDK.
const Number = "v10.15.5"
var ( var (
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s",
runtime.Version(), runtime.Version(),
runtime.GOARCH, runtime.GOARCH,
runtime.GOOS, runtime.GOOS,
Number, number,
) )
) )
// UserAgent returns a string containing the Go version, system archityecture and OS, and the go-autorest version. func userAgent() string {
func UserAgent() string { return ua
return userAgent
} }

View file

@ -21,6 +21,7 @@ import (
"strings" "strings"
"github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/tracing"
) )
const ( const (
@ -68,7 +69,7 @@ func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[str
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
} }
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters // WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters.
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
return func(p Preparer) Preparer { return func(p Preparer) Preparer {
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
@ -147,7 +148,7 @@ type BearerAuthorizerCallback struct {
// is invoked when the HTTP request is submitted. // is invoked when the HTTP request is submitted.
func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
if sender == nil { if sender == nil {
sender = &http.Client{} sender = &http.Client{Transport: tracing.Transport}
} }
return &BearerAuthorizerCallback{sender: sender, callback: callback} return &BearerAuthorizerCallback{sender: sender, callback: callback}
} }

View file

@ -26,6 +26,7 @@ import (
"time" "time"
"github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/tracing"
) )
const ( const (
@ -58,10 +59,7 @@ func NewFuture(req *http.Request) Future {
// with the initial response from an asynchronous operation. // with the initial response from an asynchronous operation.
func NewFutureFromResponse(resp *http.Response) (Future, error) { func NewFutureFromResponse(resp *http.Response) (Future, error) {
pt, err := createPollingTracker(resp) pt, err := createPollingTracker(resp)
if err != nil { return Future{pt: pt}, err
return Future{}, err
}
return Future{pt: pt}, nil
} }
// Response returns the last HTTP response. // Response returns the last HTTP response.
@ -89,7 +87,23 @@ func (f Future) PollingMethod() PollingMethodType {
} }
// Done queries the service to see if the operation has completed. // Done queries the service to see if the operation has completed.
// Deprecated: Use DoneWithContext()
func (f *Future) Done(sender autorest.Sender) (bool, error) { func (f *Future) Done(sender autorest.Sender) (bool, error) {
return f.DoneWithContext(context.Background(), sender)
}
// DoneWithContext queries the service to see if the operation has completed.
func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) {
ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext")
defer func() {
sc := -1
resp := f.Response()
if resp != nil {
sc = resp.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
// support for legacy Future implementation // support for legacy Future implementation
if f.req != nil { if f.req != nil {
resp, err := sender.Do(f.req) resp, err := sender.Do(f.req)
@ -110,7 +124,7 @@ func (f *Future) Done(sender autorest.Sender) (bool, error) {
if f.pt.hasTerminated() { if f.pt.hasTerminated() {
return true, f.pt.pollingError() return true, f.pt.pollingError()
} }
if err := f.pt.pollForStatus(sender); err != nil { if err := f.pt.pollForStatus(ctx, sender); err != nil {
return false, err return false, err
} }
if err := f.pt.checkForErrors(); err != nil { if err := f.pt.checkForErrors(); err != nil {
@ -167,11 +181,25 @@ func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) e
// running operation has completed, the provided context is cancelled, or the client's // running operation has completed, the provided context is cancelled, or the client's
// polling duration has been exceeded. It will retry failed polling attempts based on // polling duration has been exceeded. It will retry failed polling attempts based on
// the retry value defined in the client up to the maximum retry attempts. // the retry value defined in the client up to the maximum retry attempts.
func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) error { func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) {
ctx, cancel := context.WithTimeout(ctx, client.PollingDuration) ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef")
defer cancel() defer func() {
done, err := f.Done(client) sc := -1
for attempts := 0; !done; done, err = f.Done(client) { resp := f.Response()
if resp != nil {
sc = resp.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
cancelCtx := ctx
if d := client.PollingDuration; d != 0 {
var cancel context.CancelFunc
cancelCtx, cancel = context.WithTimeout(ctx, d)
defer cancel()
}
done, err := f.DoneWithContext(ctx, client)
for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) {
if attempts >= client.RetryAttempts { if attempts >= client.RetryAttempts {
return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded")
} }
@ -195,12 +223,12 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien
attempts++ attempts++
} }
// wait until the delay elapses or the context is cancelled // wait until the delay elapses or the context is cancelled
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done()) delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done())
if !delayElapsed { if !delayElapsed {
return autorest.NewErrorWithError(ctx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled")
} }
} }
return err return
} }
// MarshalJSON implements the json.Marshaler interface. // MarshalJSON implements the json.Marshaler interface.
@ -285,7 +313,7 @@ type pollingTracker interface {
initializeState() error initializeState() error
// makes an HTTP request to check the status of the LRO // makes an HTTP request to check the status of the LRO
pollForStatus(sender autorest.Sender) error pollForStatus(ctx context.Context, sender autorest.Sender) error
// updates internal tracker state, call this after each call to pollForStatus // updates internal tracker state, call this after each call to pollForStatus
updatePollingState(provStateApl bool) error updatePollingState(provStateApl bool) error
@ -399,6 +427,10 @@ func (pt *pollingTrackerBase) updateRawBody() error {
if err != nil { if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body")
} }
// observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty
if len(b) == 0 {
return nil
}
// put the body back so it's available to other callers // put the body back so it's available to other callers
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
if err = json.Unmarshal(b, &pt.rawBody); err != nil { if err = json.Unmarshal(b, &pt.rawBody); err != nil {
@ -408,15 +440,13 @@ func (pt *pollingTrackerBase) updateRawBody() error {
return nil return nil
} }
func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error { func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error {
req, err := http.NewRequest(http.MethodGet, pt.URI, nil) req, err := http.NewRequest(http.MethodGet, pt.URI, nil)
if err != nil { if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request")
} }
// attach the context from the original request if available (it will be absent for deserialized futures)
if pt.resp != nil { req = req.WithContext(ctx)
req = req.WithContext(pt.resp.Request.Context())
}
pt.resp, err = sender.Do(req) pt.resp, err = sender.Do(req)
if err != nil { if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
@ -445,7 +475,7 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
re := respErr{} re := respErr{}
defer pt.resp.Body.Close() defer pt.resp.Body.Close()
var b []byte var b []byte
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 {
goto Default goto Default
} }
if err = json.Unmarshal(b, &re); err != nil { if err = json.Unmarshal(b, &re); err != nil {
@ -663,7 +693,7 @@ func (pt *pollingTrackerPatch) updatePollingMethod() error {
} }
} }
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
// note the absense of the "final GET" mechanism for PATCH // note the absence of the "final GET" mechanism for PATCH
if pt.resp.StatusCode == http.StatusAccepted { if pt.resp.StatusCode == http.StatusAccepted {
ao, err := getURLFromAsyncOpHeader(pt.resp) ao, err := getURLFromAsyncOpHeader(pt.resp)
if err != nil { if err != nil {

View file

@ -140,8 +140,8 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
} }
// poll for registered provisioning state // poll for registered provisioning state
now := time.Now() registrationStartTime := time.Now()
for err == nil && time.Since(now) < client.PollingDuration { for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) {
// taken from the resources SDK // taken from the resources SDK
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
preparer := autorest.CreatePreparer( preparer := autorest.CreatePreparer(
@ -183,7 +183,7 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
return originalReq.Context().Err() return originalReq.Context().Err()
} }
} }
if !(time.Since(now) < client.PollingDuration) { if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) {
return errors.New("polling for resource provider registration has exceeded the polling duration") return errors.New("polling for resource provider registration has exceeded the polling duration")
} }
return err return err

View file

@ -26,7 +26,7 @@ import (
"time" "time"
"github.com/Azure/go-autorest/logger" "github.com/Azure/go-autorest/logger"
"github.com/Azure/go-autorest/version" "github.com/Azure/go-autorest/tracing"
) )
const ( const (
@ -147,6 +147,7 @@ type Client struct {
PollingDelay time.Duration PollingDelay time.Duration
// PollingDuration sets the maximum polling time after which an error is returned. // PollingDuration sets the maximum polling time after which an error is returned.
// Setting this to zero will use the provided context to control the duration.
PollingDuration time.Duration PollingDuration time.Duration
// RetryAttempts sets the default number of retry attempts for client. // RetryAttempts sets the default number of retry attempts for client.
@ -173,7 +174,7 @@ func NewClientWithUserAgent(ua string) Client {
PollingDuration: DefaultPollingDuration, PollingDuration: DefaultPollingDuration,
RetryAttempts: DefaultRetryAttempts, RetryAttempts: DefaultRetryAttempts,
RetryDuration: DefaultRetryDuration, RetryDuration: DefaultRetryDuration,
UserAgent: version.UserAgent(), UserAgent: UserAgent(),
} }
c.Sender = c.sender() c.Sender = c.sender()
c.AddToUserAgent(ua) c.AddToUserAgent(ua)
@ -229,8 +230,10 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
func (c Client) sender() Sender { func (c Client) sender() Sender {
if c.Sender == nil { if c.Sender == nil {
j, _ := cookiejar.New(nil) j, _ := cookiejar.New(nil)
return &http.Client{Jar: j} client := &http.Client{Jar: j, Transport: tracing.Transport}
return client
} }
return c.Sender return c.Sender
} }

View file

@ -21,6 +21,8 @@ import (
"net/http" "net/http"
"strconv" "strconv"
"time" "time"
"github.com/Azure/go-autorest/tracing"
) )
// Sender is the interface that wraps the Do method to send HTTP requests. // Sender is the interface that wraps the Do method to send HTTP requests.
@ -38,7 +40,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
return sf(r) return sf(r)
} }
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the // SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
// http.Request and pass it along or, first, pass the http.Request along then react to the // http.Request and pass it along or, first, pass the http.Request along then react to the
// http.Response result. // http.Response result.
type SendDecorator func(Sender) Sender type SendDecorator func(Sender) Sender
@ -68,7 +70,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
// //
// Send will not poll or retry requests. // Send will not poll or retry requests.
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
return SendWithSender(&http.Client{}, r, decorators...) return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...)
} }
// SendWithSender sends the passed http.Request, through the provided Sender, returning the // SendWithSender sends the passed http.Request, through the provided Sender, returning the
@ -216,8 +218,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
rr := NewRetriableRequest(r) rr := NewRetriableRequest(r)
// Increment to add the first call (attempts denotes number of retries) // Increment to add the first call (attempts denotes number of retries)
attempts++ for attempt := 0; attempt < attempts+1; {
for attempt := 0; attempt < attempts; {
err = rr.Prepare() err = rr.Prepare()
if err != nil { if err != nil {
return resp, err return resp, err

View file

@ -157,7 +157,7 @@ func AsStringSlice(s interface{}) ([]string, error) {
} }
// String method converts interface v to string. If interface is a list, it // String method converts interface v to string. If interface is a list, it
// joins list elements using the seperator. Note that only sep[0] will be used for // joins list elements using the separator. Note that only sep[0] will be used for
// joining if any separator is specified. // joining if any separator is specified.
func String(v interface{}, sep ...string) string { func String(v interface{}, sep ...string) string {
if len(sep) == 0 { if len(sep) == 0 {

View file

@ -1,7 +1,5 @@
package autorest package autorest
import "github.com/Azure/go-autorest/version"
// Copyright 2017 Microsoft Corporation // Copyright 2017 Microsoft Corporation
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
@ -16,7 +14,28 @@ import "github.com/Azure/go-autorest/version"
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
import (
"fmt"
"runtime"
)
const number = "v11.2.8"
var (
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
number,
)
)
// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version.
func UserAgent() string {
return userAgent
}
// Version returns the semantic version (see http://semver.org). // Version returns the semantic version (see http://semver.org).
func Version() string { func Version() string {
return version.Number return number
} }

View file

@ -162,7 +162,7 @@ type Writer interface {
// WriteResponse writes the specified HTTP response to the logger if the log level is greater than // WriteResponse writes the specified HTTP response to the logger if the log level is greater than
// or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher.
// Custom filters can be specified to exclude URL, header, and/or body content from the log. // Custom filters can be specified to exclude URL, header, and/or body content from the log.
// By default no respone content is excluded. // By default no response content is excluded.
WriteResponse(resp *http.Response, filter Filter) WriteResponse(resp *http.Response, filter Filter)
} }
@ -318,7 +318,7 @@ func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) {
// returns true if the provided body should be included in the log // returns true if the provided body should be included in the log
func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool {
ct := header.Get("Content-Type") ct := header.Get("Content-Type")
return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1 return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream")
} }
// creates standard header for log entries, it contains a timestamp and the log level // creates standard header for log entries, it contains a timestamp and the log level

190
vendor/github.com/Azure/go-autorest/tracing/tracing.go generated vendored Normal file
View file

@ -0,0 +1,190 @@
package tracing
// Copyright 2018 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"context"
"fmt"
"net/http"
"os"
"contrib.go.opencensus.io/exporter/ocagent"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
)
var (
// Transport is the default tracing RoundTripper. The custom options setter will control
// if traces are being emitted or not.
Transport = &ochttp.Transport{
Propagation: &tracecontext.HTTPFormat{},
GetStartOptions: getStartOptions,
}
// enabled is the flag for marking if tracing is enabled.
enabled = false
// Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise
// it will be using the parent sampler or the default.
sampler = trace.NeverSample()
// Views for metric instrumentation.
views = map[string]*view.View{}
// the trace exporter
traceExporter trace.Exporter
)
func init() {
enableFromEnv()
}
func enableFromEnv() {
_, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED")
_, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD")
if ok || legacyOk {
agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT")
if ok {
EnableWithAIForwarding(agentEndpoint)
} else {
Enable()
}
}
}
// IsEnabled returns true if monitoring is enabled for the sdk.
func IsEnabled() bool {
return enabled
}
// Enable will start instrumentation for metrics and traces.
func Enable() error {
enabled = true
sampler = nil
err := initStats()
return err
}
// Disable will disable instrumentation for metrics and traces.
func Disable() {
disableStats()
sampler = trace.NeverSample()
if traceExporter != nil {
trace.UnregisterExporter(traceExporter)
}
enabled = false
}
// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder
// exporter making the metrics and traces available in app insights.
func EnableWithAIForwarding(agentEndpoint string) (err error) {
err = Enable()
if err != nil {
return err
}
traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint))
if err != nil {
return err
}
trace.RegisterExporter(traceExporter)
return
}
// getStartOptions is the custom options setter for the ochttp package.
func getStartOptions(*http.Request) trace.StartOptions {
return trace.StartOptions{
Sampler: sampler,
}
}
// initStats registers the views for the http metrics
func initStats() (err error) {
clientViews := []*view.View{
ochttp.ClientCompletedCount,
ochttp.ClientRoundtripLatencyDistribution,
ochttp.ClientReceivedBytesDistribution,
ochttp.ClientSentBytesDistribution,
}
for _, cv := range clientViews {
vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name)
views[vn] = cv.WithName(vn)
err = view.Register(views[vn])
if err != nil {
return err
}
}
return
}
// disableStats will unregister the previously registered metrics
func disableStats() {
for _, v := range views {
view.Unregister(v)
}
}
// StartSpan starts a trace span
func StartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler))
return ctx
}
// EndSpan ends a previously started span stored in the context
func EndSpan(ctx context.Context, httpStatusCode int, err error) {
span := trace.FromContext(ctx)
if span == nil {
return
}
if err != nil {
span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)})
}
span.End()
}
// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined
// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status
func toTraceStatusCode(httpStatusCode int) int32 {
switch {
case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest:
return trace.StatusCodeOK
case httpStatusCode == http.StatusBadRequest:
return trace.StatusCodeInvalidArgument
case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated.
return trace.StatusCodeUnauthenticated
case httpStatusCode == http.StatusForbidden:
return trace.StatusCodePermissionDenied
case httpStatusCode == http.StatusNotFound:
return trace.StatusCodeNotFound
case httpStatusCode == http.StatusTooManyRequests:
return trace.StatusCodeResourceExhausted
case httpStatusCode == 499:
return trace.StatusCodeCancelled
case httpStatusCode == http.StatusNotImplemented:
return trace.StatusCodeUnimplemented
case httpStatusCode == http.StatusServiceUnavailable:
return trace.StatusCodeUnavailable
case httpStatusCode == http.StatusGatewayTimeout:
return trace.StatusCodeDeadlineExceeded
default:
return trace.StatusCodeUnknown
}
}

View file

@ -1 +1,2 @@
logrus logrus
vendor

View file

@ -1,3 +1,15 @@
# 1.2.0
This new release introduces:
* A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
* A new trace level named `Trace` whose level is below `Debug`
* A configurable exit function to be called upon a Fatal trace
* The `Level` object now implements `encoding.TextUnmarshaler` interface
# 1.1.1
This is a bug fix release.
* fix the build break on Solaris
* don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
# 1.1.0 # 1.1.0
This new release introduces: This new release introduces:
* several fixes: * several fixes:

View file

@ -56,8 +56,39 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
exit status 1
``` ```
To ensure this behaviour even if a TTY is attached, set your formatter as follows:
```go
log.SetFormatter(&log.TextFormatter{
DisableColors: true,
FullTimestamp: true,
})
```
#### Logging Method Name
If you wish to add the calling method as a field, instruct the logger via:
```go
log.SetReportCaller(true)
```
This adds the caller as 'method' like so:
```json
{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
```
```text
time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
```
Note that this does add measurable overhead - the cost will depend on the version of Go, but is
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
environment via benchmarks:
```
go test -bench=.*CallerTracing
```
#### Case-sensitivity #### Case-sensitivity
@ -246,9 +277,10 @@ A list of currently known of service hook can be found in this wiki [page](https
#### Level logging #### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
```go ```go
log.Trace("Something very low level.")
log.Debug("Useful debugging information.") log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!") log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.") log.Warn("You should probably take a look at this.")

View file

@ -4,11 +4,30 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"os" "os"
"reflect"
"runtime"
"strings"
"sync" "sync"
"time" "time"
) )
var bufferPool *sync.Pool var (
bufferPool *sync.Pool
// qualified package name, cached at first use
logrusPackage string
// Positions in the call stack when tracing to report the calling method
minimumCallerDepth int
// Used for caller information initialisation
callerInitOnce sync.Once
)
const (
maximumCallerDepth int = 25
knownLogrusFrames int = 4
)
func init() { func init() {
bufferPool = &sync.Pool{ bufferPool = &sync.Pool{
@ -16,15 +35,18 @@ func init() {
return new(bytes.Buffer) return new(bytes.Buffer)
}, },
} }
// start at the bottom of the stack before the package-name cache is primed
minimumCallerDepth = 1
} }
// Defines the key when adding errors using WithError. // Defines the key when adding errors using WithError.
var ErrorKey = "error" var ErrorKey = "error"
// An entry is the final or intermediate Logrus logging entry. It contains all // An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info, // the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and // Info, Warn, Error, Fatal or Panic is called on it. These objects can be
// passed around as much as you wish to avoid field duplication. // reused and passed around as much as you wish to avoid field duplication.
type Entry struct { type Entry struct {
Logger *Logger Logger *Logger
@ -34,22 +56,28 @@ type Entry struct {
// Time at which the log entry was created // Time at which the log entry was created
Time time.Time Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
// This field will be set on entry firing and the value will be equal to the one in Logger struct field. // This field will be set on entry firing and the value will be equal to the one in Logger struct field.
Level Level Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic // Calling method, with package name
Caller *runtime.Frame
// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
Message string Message string
// When formatter is called in entry.log(), a Buffer may be set to entry // When formatter is called in entry.log(), a Buffer may be set to entry
Buffer *bytes.Buffer Buffer *bytes.Buffer
// err may contain a field formatting error
err string
} }
func NewEntry(logger *Logger) *Entry { func NewEntry(logger *Logger) *Entry {
return &Entry{ return &Entry{
Logger: logger, Logger: logger,
// Default is five fields, give a little extra room // Default is three fields, plus one optional. Give a little extra room.
Data: make(Fields, 5), Data: make(Fields, 6),
} }
} }
@ -80,10 +108,18 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range entry.Data { for k, v := range entry.Data {
data[k] = v data[k] = v
} }
var field_err string
for k, v := range fields { for k, v := range fields {
data[k] = v if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func {
field_err = fmt.Sprintf("can not add field %q", k)
if entry.err != "" {
field_err = entry.err + ", " + field_err
}
} else {
data[k] = v
}
} }
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time} return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err}
} }
// Overrides the time of the Entry. // Overrides the time of the Entry.
@ -91,6 +127,57 @@ func (entry *Entry) WithTime(t time.Time) *Entry {
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t} return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
} }
// getPackageName reduces a fully qualified function name to the package name
// There really ought to be to be a better way...
func getPackageName(f string) string {
for {
lastPeriod := strings.LastIndex(f, ".")
lastSlash := strings.LastIndex(f, "/")
if lastPeriod > lastSlash {
f = f[:lastPeriod]
} else {
break
}
}
return f
}
// getCaller retrieves the name of the first non-logrus calling function
func getCaller() *runtime.Frame {
// Restrict the lookback frames to avoid runaway lookups
pcs := make([]uintptr, maximumCallerDepth)
depth := runtime.Callers(minimumCallerDepth, pcs)
frames := runtime.CallersFrames(pcs[:depth])
// cache this package's fully-qualified name
callerInitOnce.Do(func() {
logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name())
// now that we have the cache, we can skip a minimum count of known-logrus functions
// XXX this is dubious, the number of frames may vary store an entry in a logger interface
minimumCallerDepth = knownLogrusFrames
})
for f, again := frames.Next(); again; f, again = frames.Next() {
pkg := getPackageName(f.Function)
// If the caller isn't part of this package, we're done
if pkg != logrusPackage {
return &f
}
}
// if we got here, we failed to find the caller's context
return nil
}
func (entry Entry) HasCaller() (has bool) {
return entry.Logger != nil &&
entry.Logger.ReportCaller &&
entry.Caller != nil
}
// This function is not declared with a pointer value because otherwise // This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines // race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) { func (entry Entry) log(level Level, msg string) {
@ -107,6 +194,9 @@ func (entry Entry) log(level Level, msg string) {
entry.Level = level entry.Level = level
entry.Message = msg entry.Message = msg
if entry.Logger.ReportCaller {
entry.Caller = getCaller()
}
entry.fireHooks() entry.fireHooks()
@ -150,6 +240,12 @@ func (entry *Entry) write() {
} }
} }
func (entry *Entry) Trace(args ...interface{}) {
if entry.Logger.IsLevelEnabled(TraceLevel) {
entry.log(TraceLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Debug(args ...interface{}) { func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.IsLevelEnabled(DebugLevel) { if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.log(DebugLevel, fmt.Sprint(args...)) entry.log(DebugLevel, fmt.Sprint(args...))
@ -186,7 +282,7 @@ func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.IsLevelEnabled(FatalLevel) { if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.log(FatalLevel, fmt.Sprint(args...)) entry.log(FatalLevel, fmt.Sprint(args...))
} }
Exit(1) entry.Logger.Exit(1)
} }
func (entry *Entry) Panic(args ...interface{}) { func (entry *Entry) Panic(args ...interface{}) {
@ -198,6 +294,12 @@ func (entry *Entry) Panic(args ...interface{}) {
// Entry Printf family functions // Entry Printf family functions
func (entry *Entry) Tracef(format string, args ...interface{}) {
if entry.Logger.IsLevelEnabled(TraceLevel) {
entry.Trace(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Debugf(format string, args ...interface{}) { func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.IsLevelEnabled(DebugLevel) { if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.Debug(fmt.Sprintf(format, args...)) entry.Debug(fmt.Sprintf(format, args...))
@ -234,7 +336,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.IsLevelEnabled(FatalLevel) { if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.Fatal(fmt.Sprintf(format, args...)) entry.Fatal(fmt.Sprintf(format, args...))
} }
Exit(1) entry.Logger.Exit(1)
} }
func (entry *Entry) Panicf(format string, args ...interface{}) { func (entry *Entry) Panicf(format string, args ...interface{}) {
@ -245,6 +347,12 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
// Entry Println family functions // Entry Println family functions
func (entry *Entry) Traceln(args ...interface{}) {
if entry.Logger.IsLevelEnabled(TraceLevel) {
entry.Trace(entry.sprintlnn(args...))
}
}
func (entry *Entry) Debugln(args ...interface{}) { func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.IsLevelEnabled(DebugLevel) { if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.Debug(entry.sprintlnn(args...)) entry.Debug(entry.sprintlnn(args...))
@ -281,7 +389,7 @@ func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.IsLevelEnabled(FatalLevel) { if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.Fatal(entry.sprintlnn(args...)) entry.Fatal(entry.sprintlnn(args...))
} }
Exit(1) entry.Logger.Exit(1)
} }
func (entry *Entry) Panicln(args ...interface{}) { func (entry *Entry) Panicln(args ...interface{}) {

View file

@ -24,6 +24,12 @@ func SetFormatter(formatter Formatter) {
std.SetFormatter(formatter) std.SetFormatter(formatter)
} }
// SetReportCaller sets whether the standard logger will include the calling
// method as a field.
func SetReportCaller(include bool) {
std.SetReportCaller(include)
}
// SetLevel sets the standard logger level. // SetLevel sets the standard logger level.
func SetLevel(level Level) { func SetLevel(level Level) {
std.SetLevel(level) std.SetLevel(level)
@ -77,6 +83,11 @@ func WithTime(t time.Time) *Entry {
return std.WithTime(t) return std.WithTime(t)
} }
// Trace logs a message at level Trace on the standard logger.
func Trace(args ...interface{}) {
std.Trace(args...)
}
// Debug logs a message at level Debug on the standard logger. // Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) { func Debug(args ...interface{}) {
std.Debug(args...) std.Debug(args...)
@ -117,6 +128,11 @@ func Fatal(args ...interface{}) {
std.Fatal(args...) std.Fatal(args...)
} }
// Tracef logs a message at level Trace on the standard logger.
func Tracef(format string, args ...interface{}) {
std.Tracef(format, args...)
}
// Debugf logs a message at level Debug on the standard logger. // Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) { func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...) std.Debugf(format, args...)
@ -157,6 +173,11 @@ func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...) std.Fatalf(format, args...)
} }
// Traceln logs a message at level Trace on the standard logger.
func Traceln(args ...interface{}) {
std.Traceln(args...)
}
// Debugln logs a message at level Debug on the standard logger. // Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) { func Debugln(args ...interface{}) {
std.Debugln(args...) std.Debugln(args...)

View file

@ -2,7 +2,16 @@ package logrus
import "time" import "time"
const defaultTimestampFormat = time.RFC3339 // Default key names for the default fields
const (
defaultTimestampFormat = time.RFC3339
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
FieldKeyLogrusError = "logrus_error"
FieldKeyFunc = "func"
FieldKeyFile = "file"
)
// The Formatter interface is used to implement a custom Formatter. It takes an // The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones: // `Entry`. It exposes all the fields, including the default ones:
@ -18,7 +27,7 @@ type Formatter interface {
Format(*Entry) ([]byte, error) Format(*Entry) ([]byte, error)
} }
// This is to not silently overwrite `time`, `msg` and `level` fields when // This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
// dumping it. If this code wasn't there doing: // dumping it. If this code wasn't there doing:
// //
// logrus.WithField("level", 1).Info("hello") // logrus.WithField("level", 1).Info("hello")
@ -30,7 +39,7 @@ type Formatter interface {
// //
// It's not exported because it's still using Data in an opinionated way. It's to // It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters. // avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields, fieldMap FieldMap) { func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
timeKey := fieldMap.resolve(FieldKeyTime) timeKey := fieldMap.resolve(FieldKeyTime)
if t, ok := data[timeKey]; ok { if t, ok := data[timeKey]; ok {
data["fields."+timeKey] = t data["fields."+timeKey] = t
@ -48,4 +57,22 @@ func prefixFieldClashes(data Fields, fieldMap FieldMap) {
data["fields."+levelKey] = l data["fields."+levelKey] = l
delete(data, levelKey) delete(data, levelKey)
} }
logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
if l, ok := data[logrusErrKey]; ok {
data["fields."+logrusErrKey] = l
delete(data, logrusErrKey)
}
// If reportCaller is not set, 'func' will not conflict.
if reportCaller {
funcKey := fieldMap.resolve(FieldKeyFunc)
if l, ok := data[funcKey]; ok {
data["fields."+funcKey] = l
}
fileKey := fieldMap.resolve(FieldKeyFile)
if l, ok := data[fileKey]; ok {
data["fields."+fileKey] = l
}
}
} }

View file

@ -2,8 +2,9 @@ module github.com/sirupsen/logrus
require ( require (
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe github.com/konsorten/go-windows-terminal-sequences v1.0.1
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/objx v0.1.1 // indirect
github.com/stretchr/testify v1.2.2 github.com/stretchr/testify v1.2.2
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33

View file

@ -2,8 +2,11 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=

View file

@ -11,13 +11,6 @@ type fieldKey string
// FieldMap allows customization of the key names for default fields. // FieldMap allows customization of the key names for default fields.
type FieldMap map[fieldKey]string type FieldMap map[fieldKey]string
// Default key names for the default fields
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string { func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok { if k, ok := f[key]; ok {
return k return k
@ -41,9 +34,10 @@ type JSONFormatter struct {
// As an example: // As an example:
// formatter := &JSONFormatter{ // formatter := &JSONFormatter{
// FieldMap: FieldMap{ // FieldMap: FieldMap{
// FieldKeyTime: "@timestamp", // FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level", // FieldKeyLevel: "@level",
// FieldKeyMsg: "@message", // FieldKeyMsg: "@message",
// FieldKeyFunc: "@caller",
// }, // },
// } // }
FieldMap FieldMap FieldMap FieldMap
@ -54,7 +48,7 @@ type JSONFormatter struct {
// Format renders a single log entry // Format renders a single log entry
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3) data := make(Fields, len(entry.Data)+4)
for k, v := range entry.Data { for k, v := range entry.Data {
switch v := v.(type) { switch v := v.(type) {
case error: case error:
@ -72,18 +66,25 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data = newData data = newData
} }
prefixFieldClashes(data, f.FieldMap) prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
timestampFormat := f.TimestampFormat timestampFormat := f.TimestampFormat
if timestampFormat == "" { if timestampFormat == "" {
timestampFormat = defaultTimestampFormat timestampFormat = defaultTimestampFormat
} }
if entry.err != "" {
data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
}
if !f.DisableTimestamp { if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
} }
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
if entry.HasCaller() {
data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function
data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
}
var b *bytes.Buffer var b *bytes.Buffer
if entry.Buffer != nil { if entry.Buffer != nil {

View file

@ -24,6 +24,10 @@ type Logger struct {
// own that implements the `Formatter` interface, see the `README` or included // own that implements the `Formatter` interface, see the `README` or included
// formatters for examples. // formatters for examples.
Formatter Formatter Formatter Formatter
// Flag for whether to log caller info (off by default)
ReportCaller bool
// The logging level the logger should log at. This is typically (and defaults // The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. // logged.
@ -32,8 +36,12 @@ type Logger struct {
mu MutexWrap mu MutexWrap
// Reusable empty entry // Reusable empty entry
entryPool sync.Pool entryPool sync.Pool
// Function to exit the application, defaults to `os.Exit()`
ExitFunc exitFunc
} }
type exitFunc func(int)
type MutexWrap struct { type MutexWrap struct {
lock sync.Mutex lock sync.Mutex
disabled bool disabled bool
@ -69,10 +77,12 @@ func (mw *MutexWrap) Disable() {
// It's recommended to make this a global instance called `log`. // It's recommended to make this a global instance called `log`.
func New() *Logger { func New() *Logger {
return &Logger{ return &Logger{
Out: os.Stderr, Out: os.Stderr,
Formatter: new(TextFormatter), Formatter: new(TextFormatter),
Hooks: make(LevelHooks), Hooks: make(LevelHooks),
Level: InfoLevel, Level: InfoLevel,
ExitFunc: os.Exit,
ReportCaller: false,
} }
} }
@ -121,6 +131,14 @@ func (logger *Logger) WithTime(t time.Time) *Entry {
return entry.WithTime(t) return entry.WithTime(t)
} }
func (logger *Logger) Tracef(format string, args ...interface{}) {
if logger.IsLevelEnabled(TraceLevel) {
entry := logger.newEntry()
entry.Tracef(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debugf(format string, args ...interface{}) { func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.IsLevelEnabled(DebugLevel) { if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry() entry := logger.newEntry()
@ -173,7 +191,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
entry.Fatalf(format, args...) entry.Fatalf(format, args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
} }
Exit(1) logger.Exit(1)
} }
func (logger *Logger) Panicf(format string, args ...interface{}) { func (logger *Logger) Panicf(format string, args ...interface{}) {
@ -184,6 +202,14 @@ func (logger *Logger) Panicf(format string, args ...interface{}) {
} }
} }
func (logger *Logger) Trace(args ...interface{}) {
if logger.IsLevelEnabled(TraceLevel) {
entry := logger.newEntry()
entry.Trace(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debug(args ...interface{}) { func (logger *Logger) Debug(args ...interface{}) {
if logger.IsLevelEnabled(DebugLevel) { if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry() entry := logger.newEntry()
@ -236,7 +262,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
entry.Fatal(args...) entry.Fatal(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
} }
Exit(1) logger.Exit(1)
} }
func (logger *Logger) Panic(args ...interface{}) { func (logger *Logger) Panic(args ...interface{}) {
@ -247,6 +273,14 @@ func (logger *Logger) Panic(args ...interface{}) {
} }
} }
func (logger *Logger) Traceln(args ...interface{}) {
if logger.IsLevelEnabled(TraceLevel) {
entry := logger.newEntry()
entry.Traceln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debugln(args ...interface{}) { func (logger *Logger) Debugln(args ...interface{}) {
if logger.IsLevelEnabled(DebugLevel) { if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry() entry := logger.newEntry()
@ -299,7 +333,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
entry.Fatalln(args...) entry.Fatalln(args...)
logger.releaseEntry(entry) logger.releaseEntry(entry)
} }
Exit(1) logger.Exit(1)
} }
func (logger *Logger) Panicln(args ...interface{}) { func (logger *Logger) Panicln(args ...interface{}) {
@ -310,6 +344,14 @@ func (logger *Logger) Panicln(args ...interface{}) {
} }
} }
func (logger *Logger) Exit(code int) {
runHandlers()
if logger.ExitFunc == nil {
logger.ExitFunc = os.Exit
}
logger.ExitFunc(code)
}
//When file is opened with appending mode, it's safe to //When file is opened with appending mode, it's safe to
//write concurrently to a file (within 4k message on Linux). //write concurrently to a file (within 4k message on Linux).
//In these cases user can choose to disable the lock. //In these cases user can choose to disable the lock.
@ -357,6 +399,12 @@ func (logger *Logger) SetOutput(output io.Writer) {
logger.Out = output logger.Out = output
} }
func (logger *Logger) SetReportCaller(reportCaller bool) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.ReportCaller = reportCaller
}
// ReplaceHooks replaces the logger hooks and returns the old ones // ReplaceHooks replaces the logger hooks and returns the old ones
func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
logger.mu.Lock() logger.mu.Lock()

View file

@ -15,6 +15,8 @@ type Level uint32
// Convert the Level to a string. E.g. PanicLevel becomes "panic". // Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string { func (level Level) String() string {
switch level { switch level {
case TraceLevel:
return "trace"
case DebugLevel: case DebugLevel:
return "debug" return "debug"
case InfoLevel: case InfoLevel:
@ -47,12 +49,26 @@ func ParseLevel(lvl string) (Level, error) {
return InfoLevel, nil return InfoLevel, nil
case "debug": case "debug":
return DebugLevel, nil return DebugLevel, nil
case "trace":
return TraceLevel, nil
} }
var l Level var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl) return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
} }
// UnmarshalText implements encoding.TextUnmarshaler.
func (level *Level) UnmarshalText(text []byte) error {
l, err := ParseLevel(string(text))
if err != nil {
return err
}
*level = Level(l)
return nil
}
// A constant exposing all logging levels // A constant exposing all logging levels
var AllLevels = []Level{ var AllLevels = []Level{
PanicLevel, PanicLevel,
@ -61,6 +77,7 @@ var AllLevels = []Level{
WarnLevel, WarnLevel,
InfoLevel, InfoLevel,
DebugLevel, DebugLevel,
TraceLevel,
} }
// These are the different logging levels. You can set the logging level to log // These are the different logging levels. You can set the logging level to log
@ -69,7 +86,7 @@ const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the // PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ... // message passed to Debug, Info, ...
PanicLevel Level = iota PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
// logging level is set to Panic. // logging level is set to Panic.
FatalLevel FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted. // ErrorLevel level. Logs. Used for errors that should definitely be noted.
@ -82,6 +99,8 @@ const (
InfoLevel InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging. // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel DebugLevel
// TraceLevel level. Designates finer-grained informational events than the Debug.
TraceLevel
) )
// Won't compile if StdLogger can't be realized by a log.Logger // Won't compile if StdLogger can't be realized by a log.Logger
@ -148,3 +167,12 @@ type FieldLogger interface {
// IsFatalEnabled() bool // IsFatalEnabled() bool
// IsPanicEnabled() bool // IsPanicEnabled() bool
} }
// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
// here for consistancy. Do not use. Use Logger or Entry instead.
type Ext1FieldLogger interface {
FieldLogger
Tracef(format string, args ...interface{})
Trace(args ...interface{})
Traceln(args ...interface{})
}

View file

@ -1,17 +0,0 @@
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine,!js
package logrus
import (
"io"
"golang.org/x/sys/unix"
)
const ioctlReadTermios = unix.TIOCGETA
type Termios unix.Termios
func initTerminal(w io.Writer) {
}

View file

@ -1,21 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine,!js
package logrus
import (
"io"
"golang.org/x/sys/unix"
)
const ioctlReadTermios = unix.TCGETS
type Termios unix.Termios
func initTerminal(w io.Writer) {
}

View file

@ -0,0 +1,8 @@
// +build !windows
package logrus
import "io"
func initTerminal(w io.Writer) {
}

View file

@ -107,14 +107,14 @@ func (f *TextFormatter) isColored() bool {
// Format renders a single log entry // Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
prefixFieldClashes(entry.Data, f.FieldMap) prefixFieldClashes(entry.Data, f.FieldMap, entry.HasCaller())
keys := make([]string, 0, len(entry.Data)) keys := make([]string, 0, len(entry.Data))
for k := range entry.Data { for k := range entry.Data {
keys = append(keys, k) keys = append(keys, k)
} }
fixedKeys := make([]string, 0, 3+len(entry.Data)) fixedKeys := make([]string, 0, 4+len(entry.Data))
if !f.DisableTimestamp { if !f.DisableTimestamp {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
} }
@ -122,6 +122,13 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
if entry.Message != "" { if entry.Message != "" {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
} }
if entry.err != "" {
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
}
if entry.HasCaller() {
fixedKeys = append(fixedKeys,
f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile))
}
if !f.DisableSorting { if !f.DisableSorting {
if f.SortingFunc == nil { if f.SortingFunc == nil {
@ -157,13 +164,19 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
} else { } else {
for _, key := range fixedKeys { for _, key := range fixedKeys {
var value interface{} var value interface{}
switch key { switch {
case f.FieldMap.resolve(FieldKeyTime): case key == f.FieldMap.resolve(FieldKeyTime):
value = entry.Time.Format(timestampFormat) value = entry.Time.Format(timestampFormat)
case f.FieldMap.resolve(FieldKeyLevel): case key == f.FieldMap.resolve(FieldKeyLevel):
value = entry.Level.String() value = entry.Level.String()
case f.FieldMap.resolve(FieldKeyMsg): case key == f.FieldMap.resolve(FieldKeyMsg):
value = entry.Message value = entry.Message
case key == f.FieldMap.resolve(FieldKeyLogrusError):
value = entry.err
case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
value = entry.Caller.Function
case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
default: default:
value = entry.Data[key] value = entry.Data[key]
} }
@ -178,7 +191,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int var levelColor int
switch entry.Level { switch entry.Level {
case DebugLevel: case DebugLevel, TraceLevel:
levelColor = gray levelColor = gray
case WarnLevel: case WarnLevel:
levelColor = yellow levelColor = yellow
@ -197,12 +210,19 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
// the behavior of logrus text_formatter the same as the stdlib log package // the behavior of logrus text_formatter the same as the stdlib log package
entry.Message = strings.TrimSuffix(entry.Message, "\n") entry.Message = strings.TrimSuffix(entry.Message, "\n")
caller := ""
if entry.HasCaller() {
caller = fmt.Sprintf("%s:%d %s()",
entry.Caller.File, entry.Caller.Line, entry.Caller.Function)
}
if f.DisableTimestamp { if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
} else if !f.FullTimestamp { } else if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
} else { } else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
} }
for _, k := range keys { for _, k := range keys {
v := entry.Data[k] v := entry.Data[k]

View file

@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
var printFunc func(args ...interface{}) var printFunc func(args ...interface{})
switch level { switch level {
case TraceLevel:
printFunc = entry.Trace
case DebugLevel: case DebugLevel:
printFunc = entry.Debug printFunc = entry.Debug
case InfoLevel: case InfoLevel:

View file

@ -0,0 +1,36 @@
# Gradle
.gradle
build
gen_gradle
protobuf-gradle-plugin.i*
gradle.properties
local.properties
# Bazel
bazel-*
# Maven
target
# IntelliJ IDEA
.idea
*.iml
# Eclipse
.classpath
.project
.settings
bin
# OS X
.DS_Store
# Emacs
*~
\#*\#
# VS Code
.vscode
# Other
TAGS

View file

@ -0,0 +1,49 @@
sudo: required
dist: trusty
matrix:
include:
- language: java
jdk: oraclejdk8
env: BUILD=BAZEL
- language: java
jdk: oraclejdk8
env: BUILD=GRADLE
before_install:
- case "$BUILD" in
"BAZEL")
echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list ;
curl https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg | sudo apt-key add - ;
sudo apt-get update ;;
esac
install:
- case "$BUILD" in
"BAZEL")
sudo apt-get install bazel ;
bazel version;;
esac
script:
- case "$BUILD" in
"BAZEL")
cd src;
bazel build --show_result=100 ... ;;
"GRADLE")
./gradlew clean assemble --stacktrace ;
./gradlew check --stacktrace ;;
*)
echo "Missing case $BUILD" ;
exit 1 ;;
esac
before_cache:
- rm -f $HOME/.gradle/caches/modules-2/modules-2.lock
cache:
directories:
- $HOME/.gradle
- $HOME/.gradle/caches/
- $HOME/.gradle/wrapper/

View file

@ -0,0 +1 @@
Google Inc.

View file

@ -0,0 +1,26 @@
# How to submit a bug report
If you received an error message, please include it and any exceptions.
We commonly need to know which language you are using (e.g. Java) and what
platform you are on:
* Operating system (i.e., ```uname -a```)
# How to contribute
We definitely welcome patches and contributions to Census! Here are some
guideline and information about how to do so.
## Before getting started
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
We follow the [Google Proto Style Guide](
https://developers.google.com/protocol-buffers/docs/style).
## Proposing changes
Make sure that `bazel build :all` completes successfully without any new warnings.
Then create a Pull Request with your changes. When the changes are accepted, they
will be merged or cherry-picked by a Census core developer.

View file

@ -0,0 +1,80 @@
OpenCensus Proto - Language Independent Interface Types For OpenCensus
===============================================================
[![Build Status][travis-image]][travis-url]
[![Maven Central][maven-image]][maven-url]
Census provides a framework to define and collect stats against metrics and to
break those stats down across user-defined dimensions.
The Census framework is natively available in many languages (e.g. C++, Go,
and Java). The API interface types are defined using protos to ensure
consistency and interoperability for the different implementations.
## Quickstart
### Install to Go
```bash
$ go get -u github.com/census-instrumentation/opencensus-proto
```
In most cases you should depend on the gen-go files directly. If you are
building with Bazel, there are also go_proto_library build rules available.
See [PR/132](https://github.com/census-instrumentation/opencensus-proto/pull/132)
for details. However, please note that Bazel doesn't generate the final
artifacts.
### Add the dependencies to your Java project
For Maven add to `pom.xml`:
```xml
<dependency>
<groupId>io.opencensus</groupId>
<artifactId>opencensus-proto</artifactId>
<version>0.0.2</version>
</dependency>
```
For Gradle add to dependencies:
```gradle
compile 'io.opencensus:opencensus-proto:0.0.2'
```
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-proto.svg?branch=master
[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-proto
[maven-image]: https://maven-badges.herokuapp.com/maven-central/io.opencensus/opencensus-proto/badge.svg
[maven-url]: https://maven-badges.herokuapp.com/maven-central/io.opencensus/opencensus-proto
### Add the dependencies to Bazel project
In WORKSPACE, add:
```
git_repository(
name = "io_opencensus_proto",
strip_prefix = "src",
tag = "v0.0.2", # CURRENT_OPENCENSUS_PROTO_VERSION
remote = "https://github.com/census-instrumentation/opencensus-proto",
)
```
or
```
http_archive(
name = "io_opencensus_proto",
strip_prefix = "opencensus-proto-master/src",
urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/master.zip"],
)
```
In BUILD.bazel:
```bazel
proto_library(
name = "foo_proto",
srcs = ["foo.proto"],
deps = [
"@io_opencensus_proto//opencensus/proto/metrics/v1:metrics_proto",
"@io_opencensus_proto//opencensus/proto/trace/v1:trace_proto",
# etc.
],
)
```

View file

@ -0,0 +1,190 @@
# How to Create a Release of OpenCensus Proto (for Maintainers Only)
## Build Environments
We re-generate gen-go files and deploy jars to Maven Central under the following systems:
- Ubuntu 14.04
Other systems may also work, but we haven't verified them.
## Release Go files
To generate the Go files from protos, you'll need to install protoc and protoc-gen-go plugin first.
Follow the instructions [here](http://google.github.io/proto-lens/installing-protoc.html) and
[here](https://github.com/golang/protobuf#installation).
Then run the following commands to re-generate the gen-go files:
```bash
$ cd $(go env GOPATH)/src/github.com/census-instrumentation/opencensus-proto
$ git checkout -b update-gen-go
$ rm -rf gen-go
$ cd src
$ ./mkgogen.sh
$ git add -A
$ git commit -m "Update gen-go files."
```
Go through PR review and merge the changes to GitHub.
## Tagging the Release
Our release branches follow the naming convention of `v<major>.<minor>.x`, while the tags include the
patch version `v<major>.<minor>.<patch>`. For example, the same branch `v0.4.x` would be used to create
all `v0.4` tags (e.g. `v0.4.0`, `v0.4.1`).
In this section upstream repository refers to the main opencensus-proto github
repository.
Before any push to the upstream repository you need to create a [personal access
token](https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/).
1. Create the release branch and push it to GitHub:
```bash
$ MAJOR=0 MINOR=4 PATCH=0 # Set appropriately for new release
$ VERSION_FILES=(
build.gradle
pom.xml
)
$ git checkout -b v$MAJOR.$MINOR.x master
$ git push upstream v$MAJOR.$MINOR.x
```
2. Enable branch protection for the new branch, if you have admin access.
Otherwise, let someone with admin access know that there is a new release
branch.
- Open the branch protection settings for the new branch, by following
[Github's instructions](https://help.github.com/articles/configuring-protected-branches/).
- Copy the settings from a previous branch, i.e., check
- `Protect this branch`
- `Require pull request reviews before merging`
- `Require status checks to pass before merging`
- `Include administrators`
Enable the following required status checks:
- `cla/google`
- `continuous-integration/travis-ci`
- Uncheck everything else.
- Click "Save changes".
3. For `master` branch:
- Change root build files to the next minor snapshot (e.g.
`0.5.0-SNAPSHOT`).
```bash
$ git checkout -b bump-version master
# Change version to next minor (and keep -SNAPSHOT)
$ sed -i 's/[0-9]\+\.[0-9]\+\.[0-9]\+\(.*CURRENT_OPENCENSUS_PROTO_VERSION\)/'$MAJOR.$((MINOR+1)).0'\1/' \
"${VERSION_FILES[@]}"
$ ./gradlew build
$ git commit -a -m "Start $MAJOR.$((MINOR+1)).0 development cycle"
```
- Go through PR review and push the master branch to GitHub:
```bash
$ git checkout master
$ git merge --ff-only bump-version
$ git push upstream master
```
4. For `vMajor.Minor.x` branch:
- Change root build files to remove "-SNAPSHOT" for the next release
version (e.g. `0.4.0`). Commit the result and make a tag:
```bash
$ git checkout -b release v$MAJOR.$MINOR.x
# Change version to remove -SNAPSHOT
$ sed -i 's/-SNAPSHOT\(.*CURRENT_OPENCENSUS_PROTO_VERSION\)/\1/' "${VERSION_FILES[@]}"
$ ./gradlew build
$ git commit -a -m "Bump version to $MAJOR.$MINOR.$PATCH"
$ git tag -a v$MAJOR.$MINOR.$PATCH -m "Version $MAJOR.$MINOR.$PATCH"
```
- Change root build files to the next snapshot version (e.g.
`0.4.1-SNAPSHOT`). Commit the result:
```bash
# Change version to next patch and add -SNAPSHOT
$ sed -i 's/[0-9]\+\.[0-9]\+\.[0-9]\+\(.*CURRENT_OPENCENSUS_PROTO_VERSION\)/'$MAJOR.$MINOR.$((PATCH+1))-SNAPSHOT'\1/' \
"${VERSION_FILES[@]}"
$ ./gradlew build
$ git commit -a -m "Bump version to $MAJOR.$MINOR.$((PATCH+1))-SNAPSHOT"
```
- Go through PR review and push the release tag and updated release branch
to GitHub:
```bash
$ git checkout v$MAJOR.$MINOR.x
$ git merge --ff-only release
$ git push upstream v$MAJOR.$MINOR.$PATCH
$ git push upstream v$MAJOR.$MINOR.x
```
## Release Java Jar
Deployment to Maven Central (or the snapshot repo) is for all of the artifacts
from the project.
### Prerequisites
If you haven't done already, please follow the instructions
[here](https://github.com/census-instrumentation/opencensus-java/blob/master/RELEASING.md#prerequisites)
to set up the OSSRH (OSS Repository Hosting) account and signing keys. This is required for releasing
to Maven Central.
### Branch
Before building/deploying, be sure to switch to the appropriate tag. The tag
must reference a commit that has been pushed to the main repository, i.e., has
gone through code review. For the current release use:
```bash
$ git checkout -b v$MAJOR.$MINOR.$PATCH tags/v$MAJOR.$MINOR.$PATCH
```
### Initial Deployment
The following command will build the whole project and upload it to Maven
Central. Parallel building [is not safe during
uploadArchives](https://issues.gradle.org/browse/GRADLE-3420).
```bash
$ ./gradlew clean build && ./gradlew -Dorg.gradle.parallel=false uploadArchives
```
If the version has the `-SNAPSHOT` suffix, the artifacts will automatically go
to the snapshot repository. Otherwise it's a release deployment and the
artifacts will go to a staging repository.
When deploying a Release, the deployment will create [a new staging
repository](https://oss.sonatype.org/#stagingRepositories). You'll need to look
up the ID in the OSSRH UI (usually in the form of `opencensus-*`).
### Releasing on Maven Central
Once all of the artifacts have been pushed to the staging repository, the
repository must first be `closed`, which will trigger several sanity checks on
the repository. If this completes successfully, the repository can then be
`released`, which will begin the process of pushing the new artifacts to Maven
Central (the staging repository will be destroyed in the process). You can see
the complete process for releasing to Maven Central on the [OSSRH
site](http://central.sonatype.org/pages/releasing-the-deployment.html).
## Announcement
Once deployment is done, go to Github [release
page](https://github.com/census-instrumentation/opencensus-proto/releases), press
`Draft a new release` to write release notes about the new release.
You can use `git log upstream/v$MAJOR.$((MINOR-1)).x..upstream/v$MAJOR.$MINOR.x --graph --first-parent`
or the Github [compare tool](https://github.com/census-instrumentation/opencensus-proto/compare/)
to view a summary of all commits since last release as a reference.
Please pick major or important user-visible changes only.

View file

@ -0,0 +1,170 @@
description = 'Opencensus Proto'
apply plugin: 'idea'
apply plugin: 'java'
apply plugin: 'com.google.protobuf'
apply plugin: 'maven'
apply plugin: "signing"
group = "io.opencensus"
version = "0.1.0" // CURRENT_OPENCENSUS_PROTO_VERSION
sourceCompatibility = 1.6
targetCompatibility = 1.6
repositories {
maven { url "https://plugins.gradle.org/m2/" }
}
jar.manifest {
attributes('Implementation-Title': name,
'Implementation-Version': version,
'Built-By': System.getProperty('user.name'),
'Built-JDK': System.getProperty('java.version'),
'Source-Compatibility': sourceCompatibility,
'Target-Compatibility': targetCompatibility)
}
def protobufVersion = '3.5.1'
def protocVersion = '3.5.1'
def grpcVersion = "1.14.0" // CURRENT_GRPC_VERSION
buildscript {
repositories {
maven { url "https://plugins.gradle.org/m2/" }
}
dependencies {
classpath "com.google.protobuf:protobuf-gradle-plugin:0.8.6"
}
}
sourceSets {
main {
proto {
srcDir 'src'
}
}
}
dependencies {
compile "com.google.protobuf:protobuf-java:${protobufVersion}",
"io.grpc:grpc-protobuf:${grpcVersion}",
"io.grpc:grpc-stub:${grpcVersion}"
compileOnly "javax.annotation:javax.annotation-api:1.2"
}
protobuf {
protoc {
// The artifact spec for the Protobuf Compiler
artifact = "com.google.protobuf:protoc:${protocVersion}"
}
plugins {
grpc {
artifact = "io.grpc:protoc-gen-grpc-java:${grpcVersion}"
}
}
generateProtoTasks {
all()*.plugins {
grpc {}
}
ofSourceSet('main')
}
generatedFilesBaseDir = "$projectDir/gen_gradle/src"
}
// Disable all java warnings for proto generated files build
compileJava {
options.compilerArgs += ["-Xlint:none"]
options.encoding = "UTF-8"
}
clean {
delete protobuf.generatedFilesBaseDir
}
// IntelliJ complains that the generated classes are not found, ask IntelliJ to include the
// generated Java directories as source folders.
idea {
module {
sourceDirs += file("${protobuf.generatedFilesBaseDir}/main/java");
// If you have additional sourceSets and/or codegen plugins, add all of them
}
}
signing {
required false
sign configurations.archives
}
javadoc.source = "$projectDir/gen_gradle/src"
javadoc.options {
encoding = 'UTF-8'
links 'https://docs.oracle.com/javase/8/docs/api/'
}
task javadocJar(type: Jar) {
classifier = 'javadoc'
from javadoc
}
task sourcesJar(type: Jar) {
classifier = 'sources'
from sourceSets.main.allSource
}
artifacts {
archives javadocJar, sourcesJar
}
uploadArchives {
repositories {
mavenDeployer {
beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) }
def configureAuth = {
if (rootProject.hasProperty('ossrhUsername') && rootProject.hasProperty('ossrhPassword')) {
authentication(userName:rootProject.ossrhUsername, password: rootProject.ossrhPassword)
}
}
repository(url: "https://oss.sonatype.org/service/local/staging/deploy/maven2/", configureAuth)
snapshotRepository(url: "https://oss.sonatype.org/content/repositories/snapshots/", configureAuth)
pom.project {
name "OpenCensus"
packaging 'jar'
description project.description
url 'https://github.com/census-instrumentation/opencensus-proto'
scm {
connection 'scm:svn:https://github.com/census-instrumentation/opencensus-proto'
developerConnection 'scm:git:git@github.com/census-instrumentation/opencensus-proto'
url 'https://github.com/census-instrumentation/opencensus-proto'
}
licenses {
license {
name 'The Apache License, Version 2.0'
url 'http://www.apache.org/licenses/LICENSE-2.0.txt'
}
}
developers {
developer {
id 'io.opencensus'
name 'OpenCensus Contributors'
email 'census-developers@googlegroups.com'
url 'opencensus.io'
// https://issues.gradle.org/browse/GRADLE-2719
organization = 'OpenCensus Authors'
organizationUrl 'https://www.opencensus.io'
}
}
}
}
}
}

View file

@ -0,0 +1,356 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: opencensus/proto/agent/common/v1/common.proto
package v1
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type LibraryInfo_Language int32
const (
LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0
LibraryInfo_CPP LibraryInfo_Language = 1
LibraryInfo_C_SHARP LibraryInfo_Language = 2
LibraryInfo_ERLANG LibraryInfo_Language = 3
LibraryInfo_GO_LANG LibraryInfo_Language = 4
LibraryInfo_JAVA LibraryInfo_Language = 5
LibraryInfo_NODE_JS LibraryInfo_Language = 6
LibraryInfo_PHP LibraryInfo_Language = 7
LibraryInfo_PYTHON LibraryInfo_Language = 8
LibraryInfo_RUBY LibraryInfo_Language = 9
)
var LibraryInfo_Language_name = map[int32]string{
0: "LANGUAGE_UNSPECIFIED",
1: "CPP",
2: "C_SHARP",
3: "ERLANG",
4: "GO_LANG",
5: "JAVA",
6: "NODE_JS",
7: "PHP",
8: "PYTHON",
9: "RUBY",
}
var LibraryInfo_Language_value = map[string]int32{
"LANGUAGE_UNSPECIFIED": 0,
"CPP": 1,
"C_SHARP": 2,
"ERLANG": 3,
"GO_LANG": 4,
"JAVA": 5,
"NODE_JS": 6,
"PHP": 7,
"PYTHON": 8,
"RUBY": 9,
}
func (x LibraryInfo_Language) String() string {
return proto.EnumName(LibraryInfo_Language_name, int32(x))
}
func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{2, 0}
}
// Identifier metadata of the Node (Application instrumented with OpenCensus)
// that connects to OpenCensus Agent.
// In the future we plan to extend the identifier proto definition to support
// additional information (e.g cloud id, etc.)
type Node struct {
// Identifier that uniquely identifies a process within a VM/container.
Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
// Information on the OpenCensus Library that initiates the stream.
LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"`
// Additional information on service.
ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"`
// Additional attributes.
Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Node) Reset() { *m = Node{} }
func (m *Node) String() string { return proto.CompactTextString(m) }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{0}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Node.Unmarshal(m, b)
}
func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Node.Marshal(b, m, deterministic)
}
func (m *Node) XXX_Merge(src proto.Message) {
xxx_messageInfo_Node.Merge(m, src)
}
func (m *Node) XXX_Size() int {
return xxx_messageInfo_Node.Size(m)
}
func (m *Node) XXX_DiscardUnknown() {
xxx_messageInfo_Node.DiscardUnknown(m)
}
var xxx_messageInfo_Node proto.InternalMessageInfo
func (m *Node) GetIdentifier() *ProcessIdentifier {
if m != nil {
return m.Identifier
}
return nil
}
func (m *Node) GetLibraryInfo() *LibraryInfo {
if m != nil {
return m.LibraryInfo
}
return nil
}
func (m *Node) GetServiceInfo() *ServiceInfo {
if m != nil {
return m.ServiceInfo
}
return nil
}
func (m *Node) GetAttributes() map[string]string {
if m != nil {
return m.Attributes
}
return nil
}
// Identifier that uniquely identifies a process within a VM/container.
type ProcessIdentifier struct {
// The host name. Usually refers to the machine/container name.
// For example: os.Hostname() in Go, socket.gethostname() in Python.
HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
// Process id.
Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
// Start time of this ProcessIdentifier. Represented in epoch time.
StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} }
func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) }
func (*ProcessIdentifier) ProtoMessage() {}
func (*ProcessIdentifier) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{1}
}
func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b)
}
func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic)
}
func (m *ProcessIdentifier) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProcessIdentifier.Merge(m, src)
}
func (m *ProcessIdentifier) XXX_Size() int {
return xxx_messageInfo_ProcessIdentifier.Size(m)
}
func (m *ProcessIdentifier) XXX_DiscardUnknown() {
xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m)
}
var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo
func (m *ProcessIdentifier) GetHostName() string {
if m != nil {
return m.HostName
}
return ""
}
func (m *ProcessIdentifier) GetPid() uint32 {
if m != nil {
return m.Pid
}
return 0
}
func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp {
if m != nil {
return m.StartTimestamp
}
return nil
}
// Information on OpenCensus Library.
type LibraryInfo struct {
// Language of OpenCensus Library.
Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"`
// Version of Agent exporter of Library.
ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"`
// Version of OpenCensus Library.
CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LibraryInfo) Reset() { *m = LibraryInfo{} }
func (m *LibraryInfo) String() string { return proto.CompactTextString(m) }
func (*LibraryInfo) ProtoMessage() {}
func (*LibraryInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{2}
}
func (m *LibraryInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LibraryInfo.Unmarshal(m, b)
}
func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic)
}
func (m *LibraryInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_LibraryInfo.Merge(m, src)
}
func (m *LibraryInfo) XXX_Size() int {
return xxx_messageInfo_LibraryInfo.Size(m)
}
func (m *LibraryInfo) XXX_DiscardUnknown() {
xxx_messageInfo_LibraryInfo.DiscardUnknown(m)
}
var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo
func (m *LibraryInfo) GetLanguage() LibraryInfo_Language {
if m != nil {
return m.Language
}
return LibraryInfo_LANGUAGE_UNSPECIFIED
}
func (m *LibraryInfo) GetExporterVersion() string {
if m != nil {
return m.ExporterVersion
}
return ""
}
func (m *LibraryInfo) GetCoreLibraryVersion() string {
if m != nil {
return m.CoreLibraryVersion
}
return ""
}
// Additional service information.
type ServiceInfo struct {
// Name of the service.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ServiceInfo) Reset() { *m = ServiceInfo{} }
func (m *ServiceInfo) String() string { return proto.CompactTextString(m) }
func (*ServiceInfo) ProtoMessage() {}
func (*ServiceInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_126c72ed8a252c84, []int{3}
}
func (m *ServiceInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServiceInfo.Unmarshal(m, b)
}
func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic)
}
func (m *ServiceInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServiceInfo.Merge(m, src)
}
func (m *ServiceInfo) XXX_Size() int {
return xxx_messageInfo_ServiceInfo.Size(m)
}
func (m *ServiceInfo) XXX_DiscardUnknown() {
xxx_messageInfo_ServiceInfo.DiscardUnknown(m)
}
var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo
func (m *ServiceInfo) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func init() {
proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value)
proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node")
proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry")
proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier")
proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo")
proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo")
}
func init() {
proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84)
}
var fileDescriptor_126c72ed8a252c84 = []byte{
// 590 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e,
0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee,
0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01,
0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde,
0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9,
0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2,
0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89,
0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9,
0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7,
0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39,
0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b,
0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b,
0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13,
0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06,
0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d,
0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67,
0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2,
0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a,
0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a,
0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76,
0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23,
0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c,
0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c,
0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92,
0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e,
0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51,
0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14,
0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83,
0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0,
0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86,
0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4,
0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd,
0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9,
0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c,
0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70,
0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1,
0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00,
}

View file

@ -0,0 +1,443 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: opencensus/proto/agent/trace/v1/trace_service.proto
package v1
import (
fmt "fmt"
v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
proto "github.com/golang/protobuf/proto"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type CurrentLibraryConfig struct {
// This is required only in the first message on the stream or if the
// previous sent CurrentLibraryConfig message has a different Node (e.g.
// when the same RPC is used to configure multiple Applications).
Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
// Current configuration.
Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} }
func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) }
func (*CurrentLibraryConfig) ProtoMessage() {}
func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_7027f99caf7ac6a5, []int{0}
}
func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b)
}
func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic)
}
func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_CurrentLibraryConfig.Merge(m, src)
}
func (m *CurrentLibraryConfig) XXX_Size() int {
return xxx_messageInfo_CurrentLibraryConfig.Size(m)
}
func (m *CurrentLibraryConfig) XXX_DiscardUnknown() {
xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m)
}
var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo
func (m *CurrentLibraryConfig) GetNode() *v1.Node {
if m != nil {
return m.Node
}
return nil
}
func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig {
if m != nil {
return m.Config
}
return nil
}
type UpdatedLibraryConfig struct {
// This field is ignored when the RPC is used to configure only one Application.
// This is required only in the first message on the stream or if the
// previous sent UpdatedLibraryConfig message has a different Node.
Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
// Requested updated configuration.
Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} }
func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) }
func (*UpdatedLibraryConfig) ProtoMessage() {}
func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_7027f99caf7ac6a5, []int{1}
}
func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b)
}
func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic)
}
func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src)
}
func (m *UpdatedLibraryConfig) XXX_Size() int {
return xxx_messageInfo_UpdatedLibraryConfig.Size(m)
}
func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() {
xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m)
}
var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo
func (m *UpdatedLibraryConfig) GetNode() *v1.Node {
if m != nil {
return m.Node
}
return nil
}
func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig {
if m != nil {
return m.Config
}
return nil
}
type ExportTraceServiceRequest struct {
// This is required only in the first message on the stream or if the
// previous sent ExportTraceServiceRequest message has a different Node (e.g.
// when the same RPC is used to send Spans from multiple Applications).
Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
// A list of Spans that belong to the last received Node.
Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
// The resource for the spans in this message that do not have an explicit
// resource set.
// If unset, the most recently set resource in the RPC stream applies. It is
// valid to never be set within a stream, e.g. when no resource info is known.
Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} }
func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) }
func (*ExportTraceServiceRequest) ProtoMessage() {}
func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_7027f99caf7ac6a5, []int{2}
}
func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b)
}
func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic)
}
func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src)
}
func (m *ExportTraceServiceRequest) XXX_Size() int {
return xxx_messageInfo_ExportTraceServiceRequest.Size(m)
}
func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo
func (m *ExportTraceServiceRequest) GetNode() *v1.Node {
if m != nil {
return m.Node
}
return nil
}
func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span {
if m != nil {
return m.Spans
}
return nil
}
func (m *ExportTraceServiceRequest) GetResource() *v12.Resource {
if m != nil {
return m.Resource
}
return nil
}
type ExportTraceServiceResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} }
func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) }
func (*ExportTraceServiceResponse) ProtoMessage() {}
func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_7027f99caf7ac6a5, []int{3}
}
func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b)
}
func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic)
}
func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src)
}
func (m *ExportTraceServiceResponse) XXX_Size() int {
return xxx_messageInfo_ExportTraceServiceResponse.Size(m)
}
func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo
func init() {
proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig")
proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig")
proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest")
proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse")
}
func init() {
proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5)
}
var fileDescriptor_7027f99caf7ac6a5 = []byte{
// 423 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xbf, 0x6b, 0xdb, 0x40,
0x14, 0xee, 0xd9, 0xad, 0x28, 0xe7, 0x2e, 0x15, 0x1d, 0x54, 0x51, 0xb0, 0x11, 0xb4, 0x18, 0x5a,
0x9d, 0x2a, 0x1b, 0x2f, 0x2e, 0x74, 0xb0, 0x29, 0x74, 0x28, 0xc5, 0xc8, 0xed, 0x92, 0xc5, 0xc8,
0xd2, 0x8b, 0xa2, 0xc1, 0x77, 0xca, 0xdd, 0x49, 0x24, 0x90, 0x2d, 0x43, 0xf6, 0x0c, 0xf9, 0xc3,
0xf2, 0x17, 0x05, 0xdd, 0xc9, 0x3f, 0x12, 0x5b, 0x11, 0x24, 0x4b, 0xb6, 0x87, 0xde, 0xf7, 0x7d,
0xf7, 0xbd, 0x7b, 0xdf, 0x09, 0x0f, 0x59, 0x06, 0x34, 0x02, 0x2a, 0x72, 0xe1, 0x65, 0x9c, 0x49,
0xe6, 0x85, 0x09, 0x50, 0xe9, 0x49, 0x1e, 0x46, 0xe0, 0x15, 0xbe, 0x2e, 0x16, 0x02, 0x78, 0x91,
0x46, 0x40, 0x14, 0xc4, 0xec, 0x6e, 0x49, 0xfa, 0x0b, 0x51, 0x24, 0xa2, 0xb0, 0xa4, 0xf0, 0x6d,
0xb7, 0x46, 0x35, 0x62, 0xab, 0x15, 0xa3, 0xa5, 0xac, 0xae, 0x34, 0xdb, 0xfe, 0xba, 0x07, 0xe7,
0x20, 0x58, 0xce, 0xb5, 0x83, 0x75, 0x5d, 0x81, 0x3f, 0xef, 0x81, 0xef, 0x7b, 0xad, 0x60, 0xdf,
0x1a, 0x60, 0x8b, 0x88, 0xd1, 0xe3, 0x34, 0xd1, 0x68, 0xe7, 0x1a, 0xe1, 0x0f, 0xd3, 0x9c, 0x73,
0xa0, 0xf2, 0x4f, 0xba, 0xe4, 0x21, 0x3f, 0x9f, 0xaa, 0xb6, 0x39, 0xc6, 0xaf, 0x29, 0x8b, 0xc1,
0x42, 0x3d, 0xd4, 0xef, 0x0c, 0xbe, 0x90, 0x9a, 0xc9, 0xab, 0x71, 0x0a, 0x9f, 0xfc, 0x65, 0x31,
0x04, 0x8a, 0x63, 0xfe, 0xc4, 0x86, 0x3e, 0xc4, 0x6a, 0xd5, 0xb1, 0xd7, 0x37, 0x46, 0xfe, 0x95,
0x85, 0x3e, 0x33, 0xa8, 0x58, 0xca, 0xd4, 0xff, 0x2c, 0x0e, 0x25, 0xc4, 0x2f, 0xc7, 0xd4, 0x2d,
0xc2, 0x1f, 0x7f, 0x9d, 0x65, 0x8c, 0x4b, 0xd5, 0x9d, 0xeb, 0x60, 0x04, 0x70, 0x9a, 0x83, 0x90,
0xcf, 0x72, 0x36, 0xc2, 0x6f, 0x44, 0x16, 0x52, 0x61, 0xb5, 0x7a, 0xed, 0x7e, 0x67, 0xd0, 0x7d,
0xc4, 0xd8, 0x3c, 0x0b, 0x69, 0xa0, 0xd1, 0xe6, 0x04, 0xbf, 0x5d, 0x27, 0xc4, 0x6a, 0xd7, 0x1d,
0xbb, 0xc9, 0x50, 0xe1, 0x93, 0xa0, 0xaa, 0x83, 0x0d, 0xcf, 0xf9, 0x84, 0xed, 0x43, 0x33, 0x89,
0x8c, 0x51, 0x01, 0x83, 0x9b, 0x16, 0x7e, 0xb7, 0xdb, 0x30, 0x2f, 0xb0, 0x51, 0x6d, 0x62, 0x44,
0x1a, 0x9e, 0x02, 0x39, 0x94, 0x2a, 0xbb, 0x99, 0x76, 0x68, 0xef, 0xce, 0xab, 0x3e, 0xfa, 0x8e,
0xcc, 0x2b, 0x84, 0x0d, 0xed, 0xd6, 0x1c, 0x37, 0xea, 0xd4, 0xae, 0xca, 0xfe, 0xf1, 0x24, 0xae,
0xbe, 0x12, 0xed, 0x64, 0x72, 0x89, 0xb0, 0x93, 0xb2, 0x26, 0x9d, 0xc9, 0xfb, 0x5d, 0x89, 0x59,
0x89, 0x98, 0xa1, 0xa3, 0xdf, 0x49, 0x2a, 0x4f, 0xf2, 0x65, 0x19, 0x05, 0x4f, 0x93, 0xdd, 0x94,
0x0a, 0xc9, 0xf3, 0x15, 0x50, 0x19, 0xca, 0x94, 0x51, 0x6f, 0xab, 0xeb, 0xea, 0x17, 0x9c, 0x00,
0x75, 0x93, 0x87, 0x7f, 0xa8, 0xa5, 0xa1, 0x9a, 0xc3, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf,
0x9c, 0x9b, 0xf7, 0xcb, 0x04, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// TraceServiceClient is the client API for TraceService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type TraceServiceClient interface {
// After initialization, this RPC must be kept alive for the entire life of
// the application. The agent pushes configs down to applications via a
// stream.
Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error)
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error)
}
type traceServiceClient struct {
cc *grpc.ClientConn
}
func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
return &traceServiceClient{cc}
}
func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) {
stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...)
if err != nil {
return nil, err
}
x := &traceServiceConfigClient{stream}
return x, nil
}
type TraceService_ConfigClient interface {
Send(*CurrentLibraryConfig) error
Recv() (*UpdatedLibraryConfig, error)
grpc.ClientStream
}
type traceServiceConfigClient struct {
grpc.ClientStream
}
func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error {
return x.ClientStream.SendMsg(m)
}
func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) {
m := new(UpdatedLibraryConfig)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) {
stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...)
if err != nil {
return nil, err
}
x := &traceServiceExportClient{stream}
return x, nil
}
type TraceService_ExportClient interface {
Send(*ExportTraceServiceRequest) error
Recv() (*ExportTraceServiceResponse, error)
grpc.ClientStream
}
type traceServiceExportClient struct {
grpc.ClientStream
}
func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) {
m := new(ExportTraceServiceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// TraceServiceServer is the server API for TraceService service.
type TraceServiceServer interface {
// After initialization, this RPC must be kept alive for the entire life of
// the application. The agent pushes configs down to applications via a
// stream.
Config(TraceService_ConfigServer) error
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(TraceService_ExportServer) error
}
func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
s.RegisterService(&_TraceService_serviceDesc, srv)
}
func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream})
}
type TraceService_ConfigServer interface {
Send(*UpdatedLibraryConfig) error
Recv() (*CurrentLibraryConfig, error)
grpc.ServerStream
}
type traceServiceConfigServer struct {
grpc.ServerStream
}
func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error {
return x.ServerStream.SendMsg(m)
}
func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) {
m := new(CurrentLibraryConfig)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream})
}
type TraceService_ExportServer interface {
Send(*ExportTraceServiceResponse) error
Recv() (*ExportTraceServiceRequest, error)
grpc.ServerStream
}
type traceServiceExportServer struct {
grpc.ServerStream
}
func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) {
m := new(ExportTraceServiceRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _TraceService_serviceDesc = grpc.ServiceDesc{
ServiceName: "opencensus.proto.agent.trace.v1.TraceService",
HandlerType: (*TraceServiceServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Config",
Handler: _TraceService_Config_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "Export",
Handler: _TraceService_Export_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto",
}

View file

@ -0,0 +1,99 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: opencensus/proto/resource/v1/resource.proto
package v1
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Resource information.
type Resource struct {
// Type identifier for the resource.
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Set of labels that describe the resource.
Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Resource) Reset() { *m = Resource{} }
func (m *Resource) String() string { return proto.CompactTextString(m) }
func (*Resource) ProtoMessage() {}
func (*Resource) Descriptor() ([]byte, []int) {
return fileDescriptor_584700775a2fc762, []int{0}
}
func (m *Resource) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Resource.Unmarshal(m, b)
}
func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
}
func (m *Resource) XXX_Merge(src proto.Message) {
xxx_messageInfo_Resource.Merge(m, src)
}
func (m *Resource) XXX_Size() int {
return xxx_messageInfo_Resource.Size(m)
}
func (m *Resource) XXX_DiscardUnknown() {
xxx_messageInfo_Resource.DiscardUnknown(m)
}
var xxx_messageInfo_Resource proto.InternalMessageInfo
func (m *Resource) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *Resource) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func init() {
proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource")
proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry")
}
func init() {
proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762)
}
var fileDescriptor_584700775a2fc762 = []byte{
// 234 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd,
0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d,
0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08,
0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe,
0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98,
0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1,
0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25,
0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3,
0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12,
0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf,
0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19,
0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5,
0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99,
0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00,
0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00,
}

Some files were not shown because too many files have changed in this diff Show more